code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class __snake_case ( _a ):
_a = CustomTokenizer
pass
| 103 |
'''simple docstring'''
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def A_ ( snake_case ):
return 1 / (1 + np.exp(-z ))
def A_ ( snake_case , snake_case ):
return (-y * np.log(snake_case ) - (1 - y) * np.log(1 - h )).mean()
def A_ ( snake_case , snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Dict = np.dot(snake_case , snake_case )
return np.sum(y * scores - np.log(1 + np.exp(snake_case ) ) )
def A_ ( snake_case , snake_case , snake_case , snake_case=70000 ):
SCREAMING_SNAKE_CASE:List[str] = np.zeros(x.shape[1] )
for iterations in range(snake_case ):
SCREAMING_SNAKE_CASE:Union[str, Any] = np.dot(snake_case , snake_case )
SCREAMING_SNAKE_CASE:Dict = sigmoid_function(snake_case )
SCREAMING_SNAKE_CASE:List[str] = np.dot(x.T , h - y ) / y.size
SCREAMING_SNAKE_CASE:Any = theta - alpha * gradient # updating the weights
SCREAMING_SNAKE_CASE:Dict = np.dot(snake_case , snake_case )
SCREAMING_SNAKE_CASE:Union[str, Any] = sigmoid_function(snake_case )
SCREAMING_SNAKE_CASE:Dict = cost_function(snake_case , snake_case )
if iterations % 100 == 0:
print(F'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
A_ = datasets.load_iris()
A_ = iris.data[:, :2]
A_ = (iris.target != 0) * 1
A_ = 0.1
A_ = logistic_reg(alpha, x, y, max_iterations=7_00_00)
print("theta: ", theta) # printing the theta i.e our weights vector
def A_ ( snake_case ):
return sigmoid_function(
np.dot(snake_case , snake_case ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="b", label="0")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="r", label="1")
((A_) , (A_)) = (x[:, 0].min(), x[:, 0].max())
((A_) , (A_)) = (x[:, 1].min(), x[:, 1].max())
((A_) , (A_)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
A_ = np.c_[xxa.ravel(), xxa.ravel()]
A_ = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="black")
plt.legend()
plt.show()
| 139 | 0 |
'''simple docstring'''
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def UpperCAmelCase_ ( __lowercase : int = 8 ) -> str:
'''simple docstring'''
_UpperCAmelCase = ascii_letters + digits + punctuation
return "".join(secrets.choice(__lowercase ) for _ in range(__lowercase ) )
def UpperCAmelCase_ ( __lowercase : str , __lowercase : int ) -> str:
'''simple docstring'''
i -= len(__lowercase )
_UpperCAmelCase = i // 3
_UpperCAmelCase = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
_UpperCAmelCase = (
chars_incl
+ random(__lowercase , quotient + remainder )
+ random(__lowercase , __lowercase )
+ random(__lowercase , __lowercase )
)
_UpperCAmelCase = list(__lowercase )
shuffle(__lowercase )
return "".join(__lowercase )
# random is a generalised function for letters, characters and numbers
def UpperCAmelCase_ ( __lowercase : str , __lowercase : int ) -> str:
'''simple docstring'''
return "".join(secrets.choice(__lowercase ) for _ in range(__lowercase ) )
def UpperCAmelCase_ ( __lowercase : Dict , __lowercase : Optional[Any] ) -> Tuple:
'''simple docstring'''
pass # Put your code here...
def UpperCAmelCase_ ( __lowercase : Dict , __lowercase : Any ) -> int:
'''simple docstring'''
pass # Put your code here...
def UpperCAmelCase_ ( __lowercase : List[str] , __lowercase : Tuple ) -> Tuple:
'''simple docstring'''
pass # Put your code here...
def UpperCAmelCase_ ( __lowercase : str , __lowercase : int = 8 ) -> bool:
'''simple docstring'''
if len(__lowercase ) < min_length:
# Your Password must be at least 8 characters long
return False
_UpperCAmelCase = any(char in ascii_uppercase for char in password )
_UpperCAmelCase = any(char in ascii_lowercase for char in password )
_UpperCAmelCase = any(char in digits for char in password )
_UpperCAmelCase = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def UpperCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = int(input("Please indicate the max length of your password: " ).strip() )
_UpperCAmelCase = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:" , password_generator(__lowercase ) )
print(
"Alternative Password generated:" , alternative_password_generator(__lowercase , __lowercase ) , )
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main()
| 353 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def UpperCAmelCase_ ( __lowercase : str , __lowercase : Tuple , __lowercase : Union[str, Any] , __lowercase : str , __lowercase : Dict=True , __lowercase : Dict="pt" ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = {"add_prefix_space": True} if isinstance(__lowercase , __lowercase ) and not line.startswith(" " ) else {}
_UpperCAmelCase = padding_side
return tokenizer(
[line] , max_length=__lowercase , padding="max_length" if pad_to_max_length else None , truncation=__lowercase , return_tensors=__lowercase , add_special_tokens=__lowercase , **__lowercase , )
def UpperCAmelCase_ ( __lowercase : List[Any] , __lowercase : Union[str, Any] , __lowercase : Tuple=None , ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = input_ids.ne(__lowercase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class A_ ( lowerCAmelCase_ ):
def __init__( self : Union[str, Any] , snake_case_ : int , snake_case_ : str , snake_case_ : Optional[Any] , snake_case_ : List[Any] , snake_case_ : int="train" , snake_case_ : Tuple=None , snake_case_ : str=None , snake_case_ : Optional[Any]=None , snake_case_ : Any="" , ):
super().__init__()
_UpperCAmelCase = Path(snake_case_ ).joinpath(type_path + ".source" )
_UpperCAmelCase = Path(snake_case_ ).joinpath(type_path + ".target" )
_UpperCAmelCase = self.get_char_lens(self.src_file )
_UpperCAmelCase = max_source_length
_UpperCAmelCase = max_target_length
assert min(self.src_lens ) > 0, f'found empty line in {self.src_file}'
_UpperCAmelCase = tokenizer
_UpperCAmelCase = prefix
if n_obs is not None:
_UpperCAmelCase = self.src_lens[:n_obs]
_UpperCAmelCase = src_lang
_UpperCAmelCase = tgt_lang
def __len__( self : List[Any] ):
return len(self.src_lens )
def __getitem__( self : Optional[Any] , snake_case_ : List[Any] ):
_UpperCAmelCase = index + 1 # linecache starts at 1
_UpperCAmelCase = self.prefix + linecache.getline(str(self.src_file ) , snake_case_ ).rstrip("\n" )
_UpperCAmelCase = linecache.getline(str(self.tgt_file ) , snake_case_ ).rstrip("\n" )
assert source_line, f'empty source line for index {index}'
assert tgt_line, f'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer , snake_case_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_UpperCAmelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , snake_case_ ) else self.tokenizer
)
_UpperCAmelCase = self.tokenizer.generator if isinstance(self.tokenizer , snake_case_ ) else self.tokenizer
_UpperCAmelCase = encode_line(snake_case_ , snake_case_ , self.max_source_length , "right" )
_UpperCAmelCase = encode_line(snake_case_ , snake_case_ , self.max_target_length , "right" )
_UpperCAmelCase = source_inputs["input_ids"].squeeze()
_UpperCAmelCase = target_inputs["input_ids"].squeeze()
_UpperCAmelCase = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowercase ( snake_case_ : Optional[Any] ):
return [len(snake_case_ ) for x in Path(snake_case_ ).open().readlines()]
def lowercase ( self : List[str] , snake_case_ : Optional[int] ):
_UpperCAmelCase = torch.stack([x["input_ids"] for x in batch] )
_UpperCAmelCase = torch.stack([x["attention_mask"] for x in batch] )
_UpperCAmelCase = torch.stack([x["decoder_input_ids"] for x in batch] )
_UpperCAmelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , snake_case_ )
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , snake_case_ )
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = trim_batch(snake_case_ , snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = trim_batch(snake_case_ , snake_case_ , attention_mask=snake_case_ )
_UpperCAmelCase = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
__SCREAMING_SNAKE_CASE :Union[str, Any] = getLogger(__name__)
def UpperCAmelCase_ ( __lowercase : List[List] ) -> List[Any]:
'''simple docstring'''
return list(itertools.chain.from_iterable(__lowercase ) )
def UpperCAmelCase_ ( __lowercase : str ) -> None:
'''simple docstring'''
_UpperCAmelCase = get_git_info()
save_json(__lowercase , os.path.join(__lowercase , "git_log.json" ) )
def UpperCAmelCase_ ( __lowercase : int , __lowercase : Dict , __lowercase : Union[str, Any]=4 , **__lowercase : Any ) -> int:
'''simple docstring'''
with open(__lowercase , "w" ) as f:
json.dump(__lowercase , __lowercase , indent=__lowercase , **__lowercase )
def UpperCAmelCase_ ( __lowercase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
with open(__lowercase ) as f:
return json.load(__lowercase )
def UpperCAmelCase_ ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = git.Repo(search_parent_directories=__lowercase )
_UpperCAmelCase = {
"repo_id": str(__lowercase ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def UpperCAmelCase_ ( __lowercase : Callable , __lowercase : Iterable ) -> List:
'''simple docstring'''
return list(map(__lowercase , __lowercase ) )
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : str ) -> List[Any]:
'''simple docstring'''
with open(__lowercase , "wb" ) as f:
return pickle.dump(__lowercase , __lowercase )
def UpperCAmelCase_ ( __lowercase : str ) -> int:
'''simple docstring'''
def remove_articles(__lowercase : Union[str, Any] ):
return re.sub(r"\b(a|an|the)\b" , " " , __lowercase )
def white_space_fix(__lowercase : str ):
return " ".join(text.split() )
def remove_punc(__lowercase : Union[str, Any] ):
_UpperCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowercase : Union[str, Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowercase ) ) ) )
def UpperCAmelCase_ ( __lowercase : int , __lowercase : Union[str, Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase = normalize_answer(__lowercase ).split()
_UpperCAmelCase = normalize_answer(__lowercase ).split()
_UpperCAmelCase = Counter(__lowercase ) & Counter(__lowercase )
_UpperCAmelCase = sum(common.values() )
if num_same == 0:
return 0
_UpperCAmelCase = 1.0 * num_same / len(__lowercase )
_UpperCAmelCase = 1.0 * num_same / len(__lowercase )
_UpperCAmelCase = (2 * precision * recall) / (precision + recall)
return fa
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : Optional[Any] ) -> int:
'''simple docstring'''
return normalize_answer(__lowercase ) == normalize_answer(__lowercase )
def UpperCAmelCase_ ( __lowercase : List[str] , __lowercase : List[str] ) -> Dict:
'''simple docstring'''
assert len(__lowercase ) == len(__lowercase )
_UpperCAmelCase = 0
for hypo, pred in zip(__lowercase , __lowercase ):
em += exact_match_score(__lowercase , __lowercase )
if len(__lowercase ) > 0:
em /= len(__lowercase )
return {"em": em}
def UpperCAmelCase_ ( __lowercase : Tuple ) -> List[str]:
'''simple docstring'''
return model_prefix.startswith("rag" )
def UpperCAmelCase_ ( __lowercase : List[str] , __lowercase : int , __lowercase : int ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_UpperCAmelCase = "dropout_rate"
for p in extra_params:
if getattr(__lowercase , __lowercase , __lowercase ):
if not hasattr(__lowercase , __lowercase ) and not hasattr(__lowercase , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(__lowercase ) )
delattr(__lowercase , __lowercase )
continue
_UpperCAmelCase = p if hasattr(__lowercase , __lowercase ) else equivalent_param[p]
setattr(__lowercase , __lowercase , getattr(__lowercase , __lowercase ) )
delattr(__lowercase , __lowercase )
return hparams, config
| 156 | 0 |
"""simple docstring"""
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> list[str]:
return [sentence[i : i + ngram_size] for i in range(len(UpperCAmelCase ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 69 |
'''simple docstring'''
from math import ceil
def UpperCamelCase_ ( A__ : int = 10_01 ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
lowerCAmelCase_ : int = 2 * i + 1
lowerCAmelCase_ : Tuple = 2 * i
lowerCAmelCase_ : Tuple = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__A : str = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number")
| 120 | 0 |
from copy import deepcopy
class lowerCAmelCase :
def __init__( self :Optional[Any] , _lowercase :list[int] | None = None , _lowercase :int | None = None ):
'''simple docstring'''
if arr is None and size is not None:
lowercase__ = size
lowercase__ = [0] * size
elif arr is not None:
self.init(_lowercase )
else:
raise ValueError("Either arr or size must be specified" )
def UpperCAmelCase ( self :str , _lowercase :list[int] ):
'''simple docstring'''
lowercase__ = len(_lowercase )
lowercase__ = deepcopy(_lowercase )
for i in range(1 , self.size ):
lowercase__ = self.next_(_lowercase )
if j < self.size:
self.tree[j] += self.tree[i]
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
lowercase__ = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
lowercase__ = self.next_(_lowercase )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def UpperCAmelCase ( _lowercase :int ):
'''simple docstring'''
return index + (index & (-index))
@staticmethod
def UpperCAmelCase ( _lowercase :int ):
'''simple docstring'''
return index - (index & (-index))
def UpperCAmelCase ( self :List[str] , _lowercase :int , _lowercase :int ):
'''simple docstring'''
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
lowercase__ = self.next_(_lowercase )
def UpperCAmelCase ( self :str , _lowercase :int , _lowercase :int ):
'''simple docstring'''
self.add(_lowercase , value - self.get(_lowercase ) )
def UpperCAmelCase ( self :Union[str, Any] , _lowercase :int ):
'''simple docstring'''
if right == 0:
return 0
lowercase__ = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
lowercase__ = self.prev(_lowercase )
return result
def UpperCAmelCase ( self :List[str] , _lowercase :int , _lowercase :int ):
'''simple docstring'''
return self.prefix(_lowercase ) - self.prefix(_lowercase )
def UpperCAmelCase ( self :Union[str, Any] , _lowercase :int ):
'''simple docstring'''
return self.query(_lowercase , index + 1 )
def UpperCAmelCase ( self :Optional[Any] , _lowercase :int ):
'''simple docstring'''
value -= self.tree[0]
if value < 0:
return -1
lowercase__ = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
lowercase__ = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365 |
from itertools import count
def _A ( __magic_name__ = 50 ):
lowercase__ = [1] * min_block_length
for n in count(__magic_name__ ):
fill_count_functions.append(1 )
for block_length in range(__magic_name__ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 201 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def _snake_case( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any]=False ) -> List[Any]:
'''simple docstring'''
A__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A__ = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple=False ) -> Optional[int]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
A__ = ''
else:
A__ = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
A__ = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[
: config.hidden_size, :
]
A__ = in_proj_bias[: config.hidden_size]
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ = in_proj_weight[
-config.hidden_size :, :
]
A__ = in_proj_bias[-config.hidden_size :]
def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
A__ = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[int]:
'''simple docstring'''
A__ = dct.pop(SCREAMING_SNAKE_CASE__ )
A__ = val
def _snake_case( ) -> Any:
'''simple docstring'''
A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A__ = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int ) -> Dict:
'''simple docstring'''
A__ = ViTConfig()
A__ = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
A__ = True
A__ = int(vit_name[-12:-10] )
A__ = int(vit_name[-9:-6] )
else:
A__ = 1000
A__ = 'huggingface/label-files'
A__ = 'imagenet-1k-id2label.json'
A__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) )
A__ = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
A__ = int(vit_name[-6:-4] )
A__ = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('tiny' ):
A__ = 192
A__ = 768
A__ = 12
A__ = 3
elif vit_name[9:].startswith('small' ):
A__ = 384
A__ = 1536
A__ = 12
A__ = 6
else:
pass
else:
if vit_name[4:].startswith('small' ):
A__ = 768
A__ = 2304
A__ = 8
A__ = 8
elif vit_name[4:].startswith('base' ):
pass
elif vit_name[4:].startswith('large' ):
A__ = 1024
A__ = 4096
A__ = 24
A__ = 16
elif vit_name[4:].startswith('huge' ):
A__ = 1280
A__ = 5120
A__ = 32
A__ = 16
# load original model from timm
A__ = timm.create_model(SCREAMING_SNAKE_CASE__ , pretrained=SCREAMING_SNAKE_CASE__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A__ = timm_model.state_dict()
if base_model:
remove_classification_head_(SCREAMING_SNAKE_CASE__ )
A__ = create_rename_keys(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
read_in_q_k_v(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# load HuggingFace model
if vit_name[-5:] == "in21k":
A__ = ViTModel(SCREAMING_SNAKE_CASE__ ).eval()
else:
A__ = ViTForImageClassification(SCREAMING_SNAKE_CASE__ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
A__ = DeiTImageProcessor(size=config.image_size )
else:
A__ = ViTImageProcessor(size=config.image_size )
A__ = image_processor(images=prepare_img() , return_tensors='pt' )
A__ = encoding['pixel_values']
A__ = model(SCREAMING_SNAKE_CASE__ )
if base_model:
A__ = timm_model.forward_features(SCREAMING_SNAKE_CASE__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(SCREAMING_SNAKE_CASE__ , outputs.pooler_output , atol=1E-3 )
else:
A__ = timm_model(SCREAMING_SNAKE_CASE__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE__ , outputs.logits , atol=1E-3 )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
print(f'Saving model {vit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_patch16_224",
type=str,
help="Name of the ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
lowercase_ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 7 |
from __future__ import annotations
import numpy as np
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ , snake_case_ = np.shape(UpperCamelCase__ )
if rows != columns:
snake_case_ = (
'\'table\' has to be of square shaped array but got a '
F'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(UpperCamelCase__ )
snake_case_ = np.zeros((rows, columns) )
snake_case_ = np.zeros((rows, columns) )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
snake_case_ = sum(lower[i][k] * upper[k][j] for k in range(UpperCamelCase__ ) )
if upper[j][j] == 0:
raise ArithmeticError('No LU decomposition exists' )
snake_case_ = (table[i][j] - total) / upper[j][j]
snake_case_ = 1
for j in range(UpperCamelCase__ , UpperCamelCase__ ):
snake_case_ = sum(lower[i][k] * upper[k][j] for k in range(UpperCamelCase__ ) )
snake_case_ = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 285 | 0 |
"""simple docstring"""
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class _a :
"""simple docstring"""
def __init__( self : int , __UpperCamelCase : Dict , __UpperCamelCase : str=1_3 , __UpperCamelCase : Union[str, Any]=7 , __UpperCamelCase : str=True , __UpperCamelCase : int=True , __UpperCamelCase : Union[str, Any]=False , __UpperCamelCase : List[Any]=True , __UpperCamelCase : Optional[int]=9_9 , __UpperCamelCase : Dict=3_2 , __UpperCamelCase : int=5 , __UpperCamelCase : int=4 , __UpperCamelCase : List[Any]=3_7 , __UpperCamelCase : Any="gelu" , __UpperCamelCase : Union[str, Any]=0.1 , __UpperCamelCase : int=0.1 , __UpperCamelCase : Tuple=5_1_2 , __UpperCamelCase : List[Any]=1_6 , __UpperCamelCase : List[str]=2 , __UpperCamelCase : str=0.0_2 , __UpperCamelCase : Tuple=3 , __UpperCamelCase : Union[str, Any]=4 , __UpperCamelCase : Dict=None , )->str:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
def lowercase__ ( self : int )->str:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Optional[Any] )->List[str]:
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , )
def lowercase__ ( self : List[Any] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] )->List[Any]:
_UpperCAmelCase = BioGptModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any] , )->Dict:
_UpperCAmelCase = BioGptForCausalLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , *__UpperCamelCase : str )->List[str]:
_UpperCAmelCase = BioGptModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
# create attention mask
_UpperCAmelCase = torch.ones(input_ids.shape , dtype=torch.long , device=__UpperCamelCase )
_UpperCAmelCase = self.seq_length // 2
_UpperCAmelCase = 0
# first forward pass
_UpperCAmelCase , _UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase ).to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
_UpperCAmelCase = ids_tensor((1,) , __UpperCamelCase ).item() + 1
_UpperCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
_UpperCAmelCase = random_other_next_tokens
# append to next input_ids and attn_mask
_UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_UpperCAmelCase = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=__UpperCamelCase )] , dim=1 , )
# get two different outputs
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase )['''last_hidden_state''']
_UpperCAmelCase = model(__UpperCamelCase , past_key_values=__UpperCamelCase , attention_mask=__UpperCamelCase )['''last_hidden_state''']
# select random slice
_UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_UpperCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach()
_UpperCAmelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
def lowercase__ ( self : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , *__UpperCamelCase : Tuple )->Optional[int]:
_UpperCAmelCase = BioGptModel(config=__UpperCamelCase ).to(__UpperCamelCase ).eval()
_UpperCAmelCase = torch.ones(input_ids.shape , dtype=torch.long , device=__UpperCamelCase )
# first forward pass
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , use_cache=__UpperCamelCase )
_UpperCAmelCase , _UpperCAmelCase = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
_UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCAmelCase = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
_UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_UpperCAmelCase = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase )['''last_hidden_state''']
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase )[
'''last_hidden_state'''
]
# select random slice
_UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
_UpperCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : Dict , *__UpperCamelCase : int , __UpperCamelCase : List[Any]=False )->Tuple:
_UpperCAmelCase = BioGptForCausalLM(__UpperCamelCase )
model.to(__UpperCamelCase )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
_UpperCAmelCase = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def lowercase__ ( self : str , __UpperCamelCase : Any , *__UpperCamelCase : List[Any] )->int:
_UpperCAmelCase = BioGptModel(__UpperCamelCase )
_UpperCAmelCase = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_0_1 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.0_1 )
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Dict , *__UpperCamelCase : Any )->Optional[Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = BioGptForTokenClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : str )->int:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _a ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
UpperCamelCase__ = (BioGptForCausalLM,) if is_torch_available() else ()
UpperCamelCase__ = (
{
"""feature-extraction""": BioGptModel,
"""text-classification""": BioGptForSequenceClassification,
"""text-generation""": BioGptForCausalLM,
"""token-classification""": BioGptForTokenClassification,
"""zero-shot""": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
def lowercase__ ( self : Optional[int] )->Union[str, Any]:
_UpperCAmelCase = BioGptModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=3_7 )
def lowercase__ ( self : List[str] )->int:
self.config_tester.run_common_tests()
def lowercase__ ( self : Union[str, Any] )->str:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowercase__ ( self : Union[str, Any] )->List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowercase__ ( self : Optional[Any] )->Union[str, Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__UpperCamelCase )
def lowercase__ ( self : Tuple )->int:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*__UpperCamelCase , gradient_checkpointing=__UpperCamelCase )
def lowercase__ ( self : Optional[int] )->List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__UpperCamelCase )
def lowercase__ ( self : Union[str, Any] )->int:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*__UpperCamelCase )
def lowercase__ ( self : Union[str, Any] )->Optional[int]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*__UpperCamelCase )
@slow
def lowercase__ ( self : int )->Union[str, Any]:
_UpperCAmelCase = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(__UpperCamelCase )
_UpperCAmelCase = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
_UpperCAmelCase = '''left'''
# Define PAD Token = EOS Token = 50256
_UpperCAmelCase = tokenizer.eos_token
_UpperCAmelCase = model.config.eos_token_id
# use different length sentences to test batching
_UpperCAmelCase = [
'''Hello, my dog is a little''',
'''Today, I''',
]
_UpperCAmelCase = tokenizer(__UpperCamelCase , return_tensors='''pt''' , padding=__UpperCamelCase )
_UpperCAmelCase = inputs['''input_ids'''].to(__UpperCamelCase )
_UpperCAmelCase = model.generate(
input_ids=__UpperCamelCase , attention_mask=inputs['''attention_mask'''].to(__UpperCamelCase ) , )
_UpperCAmelCase = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(__UpperCamelCase )
_UpperCAmelCase = model.generate(input_ids=__UpperCamelCase )
_UpperCAmelCase = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
_UpperCAmelCase = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(__UpperCamelCase )
_UpperCAmelCase = model.generate(input_ids=__UpperCamelCase , max_length=model.config.max_length - num_paddings )
_UpperCAmelCase = tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
_UpperCAmelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__UpperCamelCase )
_UpperCAmelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=__UpperCamelCase )
_UpperCAmelCase = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
self.assertListEqual(__UpperCamelCase , [non_padded_sentence, padded_sentence] )
@slow
def lowercase__ ( self : Dict )->List[str]:
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = BioGptModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def lowercase__ ( self : int )->str:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = 3
_UpperCAmelCase = input_dict['''input_ids''']
_UpperCAmelCase = input_ids.ne(1 ).to(__UpperCamelCase )
_UpperCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_UpperCAmelCase = BioGptForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase__ ( self : str )->Tuple:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = 3
_UpperCAmelCase = '''multi_label_classification'''
_UpperCAmelCase = input_dict['''input_ids''']
_UpperCAmelCase = input_ids.ne(1 ).to(__UpperCamelCase )
_UpperCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_UpperCAmelCase = BioGptForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class _a ( unittest.TestCase):
"""simple docstring"""
@slow
def lowercase__ ( self : Dict )->Optional[int]:
_UpperCAmelCase = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
_UpperCAmelCase = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]] )
_UpperCAmelCase = model(__UpperCamelCase )[0]
_UpperCAmelCase = 4_2_3_8_4
_UpperCAmelCase = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , __UpperCamelCase )
_UpperCAmelCase = torch.tensor(
[[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCamelCase , atol=1e-4 ) )
@slow
def lowercase__ ( self : str )->Optional[Any]:
_UpperCAmelCase = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
_UpperCAmelCase = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(__UpperCamelCase )
torch.manual_seed(0 )
_UpperCAmelCase = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(__UpperCamelCase )
_UpperCAmelCase = model.generate(
**__UpperCamelCase , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=__UpperCamelCase , )
_UpperCAmelCase = tokenizer.decode(output_ids[0] , skip_special_tokens=__UpperCamelCase )
_UpperCAmelCase = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
| 326 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(_SCREAMING_SNAKE_CASE ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"huggingface/informer-tourism-monthly": (
"https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : str = 'informer'
__lowerCamelCase : Optional[int] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__(self , A = None , A = None , A = "student_t" , A = "nll" , A = 1 , A = None , A = "mean" , A = 0 , A = 0 , A = 0 , A = 0 , A = None , A = None , A = 64 , A = 32 , A = 32 , A = 2 , A = 2 , A = 2 , A = 2 , A = True , A = "gelu" , A = 0.05 , A = 0.1 , A = 0.1 , A = 0.1 , A = 0.1 , A = 100 , A = 0.02 , A=True , A = "prob" , A = 5 , A = True , **A , ) -> List[Any]:
"""simple docstring"""
_a = prediction_length
_a = context_length or prediction_length
_a = distribution_output
_a = loss
_a = input_size
_a = num_time_features
_a = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
_a = scaling
_a = num_dynamic_real_features
_a = num_static_real_features
_a = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(A ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
_a = cardinality
else:
_a = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(A ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
_a = embedding_dimension
else:
_a = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
_a = num_parallel_samples
# Transformer architecture configuration
_a = input_size * len(self.lags_sequence ) + self._number_of_features
_a = d_model
_a = encoder_attention_heads
_a = decoder_attention_heads
_a = encoder_ffn_dim
_a = decoder_ffn_dim
_a = encoder_layers
_a = decoder_layers
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = encoder_layerdrop
_a = decoder_layerdrop
_a = activation_function
_a = init_std
_a = use_cache
# Informer
_a = attention_type
_a = sampling_factor
_a = distil
super().__init__(is_encoder_decoder=A , **A )
@property
def a__ (self ) -> int:
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 211 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Any = 'xmod'
def __init__(self , A=30_522 , A=768 , A=12 , A=12 , A=3_072 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=2 , A=0.02 , A=1E-12 , A=1 , A=0 , A=2 , A="absolute" , A=True , A=None , A=False , A=2 , A=False , A=True , A=True , A=("en_XX",) , A=None , **A , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = initializer_range
_a = layer_norm_eps
_a = position_embedding_type
_a = use_cache
_a = classifier_dropout
_a = pre_norm
_a = adapter_reduction_factor
_a = adapter_layer_norm
_a = adapter_reuse_layer_norm
_a = ln_before_adapter
_a = list(A )
_a = default_language
class __A ( A ):
'''simple docstring'''
@property
def a__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_a = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 211 | 1 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class __UpperCAmelCase:
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=30 , snake_case__=2 , snake_case__=3 , snake_case__=True , snake_case__=True , snake_case__=32 , snake_case__=2 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=10 , snake_case__=0.02 , snake_case__=3 , snake_case__=None , snake_case__=2 , ):
'''simple docstring'''
lowercase__ : List[Any]= parent
lowercase__ : Optional[int]= batch_size
lowercase__ : List[Any]= image_size
lowercase__ : Tuple= patch_size
lowercase__ : str= num_channels
lowercase__ : Union[str, Any]= is_training
lowercase__ : List[str]= use_labels
lowercase__ : List[Any]= hidden_size
lowercase__ : Dict= num_hidden_layers
lowercase__ : Any= num_attention_heads
lowercase__ : str= intermediate_size
lowercase__ : int= hidden_act
lowercase__ : str= hidden_dropout_prob
lowercase__ : Optional[Any]= attention_probs_dropout_prob
lowercase__ : Tuple= type_sequence_label_size
lowercase__ : Dict= initializer_range
lowercase__ : Optional[Any]= scope
lowercase__ : int= encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowercase__ : int= (image_size // patch_size) ** 2
lowercase__ : List[Any]= num_patches + 2
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Any= None
if self.use_labels:
lowercase__ : List[str]= ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : int= self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : str= TFDeiTModel(config=snake_case__ )
lowercase__ : Dict= model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : List[Any]= TFDeiTForMaskedImageModeling(config=snake_case__ )
lowercase__ : Tuple= model(snake_case__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase__ : List[Any]= 1
lowercase__ : List[Any]= TFDeiTForMaskedImageModeling(snake_case__ )
lowercase__ : Tuple= floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ : Tuple= model(snake_case__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Union[str, Any]= self.type_sequence_label_size
lowercase__ : int= TFDeiTForImageClassification(snake_case__ )
lowercase__ : Optional[Any]= model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase__ : Optional[Any]= 1
lowercase__ : List[str]= TFDeiTForImageClassification(snake_case__ )
lowercase__ : List[str]= floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ : int= model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : str= self.prepare_config_and_inputs()
lowercase__ : Optional[Any]= config_and_inputs
lowercase__ : Union[str, Any]= {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __UpperCAmelCase( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
__lowerCamelCase = (
{
"feature-extraction": TFDeiTModel,
"image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= TFDeiTModelTester(self )
lowercase__ : Any= ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
pass
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Any= model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowercase__ : Any= model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , tf.keras.layers.Dense ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : int= model_class(snake_case__ )
lowercase__ : Optional[int]= inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : str= [*signature.parameters.keys()]
lowercase__ : str= ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__=False ):
'''simple docstring'''
lowercase__ : Tuple= super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : str= TFDeiTModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def lowercase__() ->Optional[int]:
"""simple docstring"""
lowercase__ : List[Any]= Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : int= TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" )
lowercase__ : Optional[int]= self.default_image_processor
lowercase__ : int= prepare_img()
lowercase__ : int= image_processor(images=snake_case__ , return_tensors="tf" )
# forward pass
lowercase__ : str= model(**snake_case__ )
# verify the logits
lowercase__ : Union[str, Any]= tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , snake_case__ )
lowercase__ : Tuple= tf.constant([-1.02_66, 0.19_12, -1.28_61] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , snake_case__ , atol=1e-4 ) )
| 356 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
a : List[str] = logging.get_logger(__name__)
a : List[Any] = [
("""bert.bert""", """visual_bert"""),
("""bert.cls""", """cls"""),
("""bert.classifier""", """cls"""),
("""token_type_embeddings_visual""", """visual_token_type_embeddings"""),
("""position_embeddings_visual""", """visual_position_embeddings"""),
("""projection""", """visual_projection"""),
]
a : Dict = [
"""nlvr2_coco_pre_trained.th""",
"""nlvr2_fine_tuned.th""",
"""nlvr2_pre_trained.th""",
"""vcr_coco_pre_train.th""",
"""vcr_fine_tune.th""",
"""vcr_pre_train.th""",
"""vqa_coco_pre_trained.th""",
"""vqa_fine_tuned.th""",
"""vqa_pre_trained.th""",
]
def lowercase__(A ) ->Optional[int]:
"""simple docstring"""
lowercase__ : Any= torch.load(A , map_location="cpu" )
return sd
def lowercase__(A , A , A=rename_keys_prefix ) ->List[str]:
"""simple docstring"""
lowercase__ : int= OrderedDict()
lowercase__ : Optional[Any]= torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
lowercase__ : Union[str, Any]= key
for name_pair in rename_keys_prefix:
lowercase__ : str= new_key.replace(name_pair[0] , name_pair[1] )
lowercase__ : Union[str, Any]= d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
lowercase__ : Optional[int]= new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def lowercase__(A , A ) ->str:
"""simple docstring"""
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), f'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
lowercase__ : Union[str, Any]= "pretraining"
if "vcr" in checkpoint_path:
lowercase__ : str= {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
lowercase__ : Optional[Any]= {"visual_embedding_dim": 2_048}
elif "vqa" in checkpoint_path:
lowercase__ : int= {"visual_embedding_dim": 2_048}
elif "nlvr" in checkpoint_path:
lowercase__ : Tuple= {"visual_embedding_dim": 1_024}
else:
raise NotImplementedError(f'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
lowercase__ : int= {"visual_embedding_dim": 512}
lowercase__ : int= "multichoice"
elif "vqa_advanced" in checkpoint_path:
lowercase__ : Dict= {"visual_embedding_dim": 2_048}
lowercase__ : Optional[Any]= "vqa_advanced"
elif "vqa" in checkpoint_path:
lowercase__ : Optional[int]= {"visual_embedding_dim": 2_048, "num_labels": 3_129}
lowercase__ : List[str]= "vqa"
elif "nlvr" in checkpoint_path:
lowercase__ : Dict= {
"visual_embedding_dim": 1_024,
"num_labels": 2,
}
lowercase__ : Any= "nlvr"
lowercase__ : List[Any]= VisualBertConfig(**A )
# Load State Dict
lowercase__ : Union[str, Any]= load_state_dict(A )
lowercase__ : List[str]= get_new_dict(A , A )
if model_type == "pretraining":
lowercase__ : Optional[Any]= VisualBertForPreTraining(A )
elif model_type == "vqa":
lowercase__ : Any= VisualBertForQuestionAnswering(A )
elif model_type == "nlvr":
lowercase__ : Union[str, Any]= VisualBertForVisualReasoning(A )
elif model_type == "multichoice":
lowercase__ : str= VisualBertForMultipleChoice(A )
model.load_state_dict(A )
# Save Checkpoints
Path(A ).mkdir(exist_ok=A )
model.save_pretrained(A )
if __name__ == "__main__":
a : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""")
a : Dict = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 150 | 0 |
'''simple docstring'''
import math
def _lowerCAmelCase ( _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
"""simple docstring"""
return math.pow(_UpperCamelCase , 2 ) - a
def _lowerCAmelCase ( _UpperCamelCase : float ) -> float:
"""simple docstring"""
return 2 * x
def _lowerCAmelCase ( _UpperCamelCase : float ) -> float:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =2.0
while start <= a:
_SCREAMING_SNAKE_CASE =math.pow(_UpperCamelCase , 2 )
return start
def _lowerCAmelCase ( _UpperCamelCase : float , _UpperCamelCase : int = 99_99 , _UpperCamelCase : float = 0.00_00_00_00_00_00_01 ) -> float:
"""simple docstring"""
if a < 0:
raise ValueError('math domain error' )
_SCREAMING_SNAKE_CASE =get_initial_point(_UpperCamelCase )
for _ in range(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =value
_SCREAMING_SNAKE_CASE =value - fx(_UpperCamelCase , _UpperCamelCase ) / fx_derivative(_UpperCamelCase )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 47 | def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = len(_lowercase )
SCREAMING_SNAKE_CASE : Any = len(_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
SCREAMING_SNAKE_CASE : Union[str, Any] = True
for i in range(_lowercase ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
SCREAMING_SNAKE_CASE : List[str] = True
if a[i].islower():
SCREAMING_SNAKE_CASE : Dict = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 182 | 0 |
"""simple docstring"""
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
a_ = datasets.logging.get_logger(__name__)
a_ = """\
@inproceedings{bleurt,
title={BLEURT: Learning Robust Metrics for Text Generation},
author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},
booktitle={ACL},
year={2020},
url={https://arxiv.org/abs/2004.04696}
}
"""
a_ = """\
BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)
and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune
it for your specific application (the latter is expected to perform better).
See the project's README at https://github.com/google-research/bleurt#readme for more information.
"""
a_ = """
BLEURT score.
Args:
`predictions` (list of str): prediction/candidate sentences
`references` (list of str): reference sentences
`checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.
Returns:
'scores': List of scores.
Examples:
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> bleurt = datasets.load_metric(\"bleurt\")
>>> results = bleurt.compute(predictions=predictions, references=references)
>>> print([round(v, 2) for v in results[\"scores\"]])
[1.03, 1.04]
"""
a_ = {
"""bleurt-tiny-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip""",
"""bleurt-tiny-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip""",
"""bleurt-base-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip""",
"""bleurt-base-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip""",
"""bleurt-large-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip""",
"""bleurt-large-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip""",
"""BLEURT-20-D3""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip""",
"""BLEURT-20-D6""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip""",
"""BLEURT-20-D12""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip""",
"""BLEURT-20""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip""",
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase__( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/google-research/bleurt''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/bleurt'''] , reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''] , )
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
if self.config_name == "default":
logger.warning(
'''Using default BLEURT-Base checkpoint for sequence maximum length 128. '''
'''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''' )
__A : Any = '''bleurt-base-128'''
if self.config_name.lower() in CHECKPOINT_URLS:
__A : int = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
__A : int = self.config_name.upper()
else:
raise KeyError(
F"""{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}""" )
# download the model checkpoint specified by self.config_name and set up the scorer
__A : List[str] = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
__A : Union[str, Any] = score.BleurtScorer(os.path.join(__lowerCamelCase , __lowerCamelCase ) )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : str = self.scorer.score(references=__lowerCamelCase , candidates=__lowerCamelCase )
return {"scores": scores}
| 291 |
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = """"""
_lowerCamelCase = """hf-legacy""" # "hf://"" is reserved for hffs
def __init__( self , __lowerCamelCase = None , __lowerCamelCase = None , **__lowerCamelCase , ):
'''simple docstring'''
super().__init__(self , **__lowerCamelCase )
__A : int = repo_info
__A : Optional[int] = token
__A : int = None
def UpperCamelCase__( self ):
'''simple docstring'''
if self.dir_cache is None:
__A : int = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__A : Tuple = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(__lowerCamelCase ): {'''name''': str(__lowerCamelCase ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = "rb" , **__lowerCamelCase , ):
'''simple docstring'''
if not isinstance(self.repo_info , __lowerCamelCase ):
raise NotImplementedError(F"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
__A : Union[str, Any] = hf_hub_url(self.repo_info.id , __lowerCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__lowerCamelCase , mode=__lowerCamelCase , headers=get_authentication_headers_for_url(__lowerCamelCase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def UpperCamelCase__( self , __lowerCamelCase , **__lowerCamelCase ):
'''simple docstring'''
self._get_dirs()
__A : Optional[Any] = self._strip_protocol(__lowerCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase=False , **__lowerCamelCase ):
'''simple docstring'''
self._get_dirs()
__A : Any = PurePosixPath(path.strip('''/''' ) )
__A : Any = {}
for p, f in self.dir_cache.items():
__A : List[Any] = PurePosixPath(p.strip('''/''' ) )
__A : Dict = p.parent
if root == path:
__A : Union[str, Any] = f
__A : List[str] = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 291 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A : str = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[Any] = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
_A : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 229 | '''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_A : Optional[Any] = logging.get_logger(__name__)
# General docstring
_A : Optional[Any] = '''ResNetConfig'''
# Base docstring
_A : Tuple = '''microsoft/resnet-50'''
_A : List[str] = [1, 2048, 7, 7]
# Image classification docstring
_A : str = '''microsoft/resnet-50'''
_A : Dict = '''tiger cat'''
_A : List[Any] = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : str = "relu" ) -> Any:
super().__init__()
__lowerCAmelCase = nn.Convad(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , padding=kernel_size // 2 , bias=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = nn.BatchNormad(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = ACTaFN[activation] if activation is not None else nn.Identity()
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tensor ) -> Tensor:
__lowerCAmelCase = self.convolution(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = self.normalization(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = self.activation(SCREAMING_SNAKE_CASE__ )
return hidden_state
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : ResNetConfig ) -> List[str]:
super().__init__()
__lowerCAmelCase = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
__lowerCAmelCase = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
__lowerCAmelCase = config.num_channels
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tensor ) -> Tensor:
__lowerCAmelCase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
__lowerCAmelCase = self.embedder(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = self.pooler(SCREAMING_SNAKE_CASE__ )
return embedding
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 2 ) -> Dict:
super().__init__()
__lowerCAmelCase = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=1 , stride=SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = nn.BatchNormad(SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tensor ) -> Tensor:
__lowerCAmelCase = self.convolution(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = self.normalization(SCREAMING_SNAKE_CASE__ )
return hidden_state
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : str = "relu" ) -> Dict:
super().__init__()
__lowerCAmelCase = in_channels != out_channels or stride != 1
__lowerCAmelCase = (
ResNetShortCut(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ ) if should_apply_shortcut else nn.Identity()
)
__lowerCAmelCase = nn.Sequential(
ResNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ ) , ResNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , activation=SCREAMING_SNAKE_CASE__ ) , )
__lowerCAmelCase = ACTaFN[activation]
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
__lowerCAmelCase = hidden_state
__lowerCAmelCase = self.layer(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = self.shortcut(SCREAMING_SNAKE_CASE__ )
hidden_state += residual
__lowerCAmelCase = self.activation(SCREAMING_SNAKE_CASE__ )
return hidden_state
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : str = "relu" , SCREAMING_SNAKE_CASE__ : int = 4 ) -> int:
super().__init__()
__lowerCAmelCase = in_channels != out_channels or stride != 1
__lowerCAmelCase = out_channels // reduction
__lowerCAmelCase = (
ResNetShortCut(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ ) if should_apply_shortcut else nn.Identity()
)
__lowerCAmelCase = nn.Sequential(
ResNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=1 ) , ResNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ ) , ResNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE__ ) , )
__lowerCAmelCase = ACTaFN[activation]
def a ( self : Any , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
__lowerCAmelCase = hidden_state
__lowerCAmelCase = self.layer(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = self.shortcut(SCREAMING_SNAKE_CASE__ )
hidden_state += residual
__lowerCAmelCase = self.activation(SCREAMING_SNAKE_CASE__ )
return hidden_state
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : ResNetConfig , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , ) -> int:
super().__init__()
__lowerCAmelCase = ResNetBottleNeckLayer if config.layer_type == """bottleneck""" else ResNetBasicLayer
__lowerCAmelCase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , activation=config.hidden_act ) , *[layer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tensor ) -> Tensor:
__lowerCAmelCase = input
for layer in self.layers:
__lowerCAmelCase = layer(SCREAMING_SNAKE_CASE__ )
return hidden_state
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : ResNetConfig ) -> Optional[int]:
super().__init__()
__lowerCAmelCase = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
SCREAMING_SNAKE_CASE__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
__lowerCAmelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(SCREAMING_SNAKE_CASE__ , config.depths[1:] ):
self.stages.append(ResNetStage(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , depth=SCREAMING_SNAKE_CASE__ ) )
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : Tensor , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = True ) -> BaseModelOutputWithNoAttention:
__lowerCAmelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__lowerCAmelCase = hidden_states + (hidden_state,)
__lowerCAmelCase = stage_module(SCREAMING_SNAKE_CASE__ )
if output_hidden_states:
__lowerCAmelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE__ , hidden_states=SCREAMING_SNAKE_CASE__ , )
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : int = ResNetConfig
_SCREAMING_SNAKE_CASE : Union[str, Any] = """resnet"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = """pixel_values"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str:
if isinstance(SCREAMING_SNAKE_CASE__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(SCREAMING_SNAKE_CASE__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ) -> int:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase = value
_A : Dict = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_A : Optional[int] = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"""The bare ResNet model outputting raw features without any specific head on top.""" , UpperCAmelCase__ , )
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]:
super().__init__(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = config
__lowerCAmelCase = ResNetEmbeddings(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = ResNetEncoder(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tensor , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
__lowerCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase = self.embedder(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = self.encoder(
SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = encoder_outputs[0]
__lowerCAmelCase = self.pooler(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE__ , pooler_output=SCREAMING_SNAKE_CASE__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , UpperCAmelCase__ , )
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Tuple ) -> Any:
super().__init__(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = config.num_labels
__lowerCAmelCase = ResNetModel(SCREAMING_SNAKE_CASE__ )
# classification head
__lowerCAmelCase = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a ( self : int , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.LongTensor] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
__lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase = self.resnet(SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = outputs.pooler_output if return_dict else outputs[1]
__lowerCAmelCase = self.classifier(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowerCAmelCase = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowerCAmelCase = """single_label_classification"""
else:
__lowerCAmelCase = """multi_label_classification"""
if self.config.problem_type == "regression":
__lowerCAmelCase = MSELoss()
if self.num_labels == 1:
__lowerCAmelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__lowerCAmelCase = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif self.config.problem_type == "single_label_classification":
__lowerCAmelCase = CrossEntropyLoss()
__lowerCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__lowerCAmelCase = BCEWithLogitsLoss()
__lowerCAmelCase = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not return_dict:
__lowerCAmelCase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=SCREAMING_SNAKE_CASE__ , logits=SCREAMING_SNAKE_CASE__ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"""
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
""" , UpperCAmelCase__ , )
class _lowercase ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple:
super().__init__(SCREAMING_SNAKE_CASE__ )
super()._init_backbone(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = [config.embedding_size] + config.hidden_sizes
__lowerCAmelCase = ResNetEmbeddings(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = ResNetEncoder(SCREAMING_SNAKE_CASE__ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
@replace_return_docstrings(output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tensor , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None ) -> BackboneOutput:
__lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCAmelCase = self.embedder(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = self.encoder(SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = outputs.hidden_states
__lowerCAmelCase = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
__lowerCAmelCase = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=SCREAMING_SNAKE_CASE__ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=SCREAMING_SNAKE_CASE__ , )
| 229 | 1 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __magic_name__ :
def __init__( self : Tuple ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : List[Any]=13 ,_UpperCAmelCase : Optional[Any]=30 ,_UpperCAmelCase : Tuple=2 ,_UpperCAmelCase : List[Any]=3 ,_UpperCAmelCase : int=True ,_UpperCAmelCase : str=True ,_UpperCAmelCase : Optional[Any]=32 ,_UpperCAmelCase : Optional[int]=2 ,_UpperCAmelCase : Union[str, Any]=4 ,_UpperCAmelCase : List[Any]=37 ,_UpperCAmelCase : List[str]="gelu" ,_UpperCAmelCase : int=0.1 ,_UpperCAmelCase : List[Any]=0.1 ,_UpperCAmelCase : List[Any]=10 ,_UpperCAmelCase : Optional[int]=0.02 ,_UpperCAmelCase : Any=3 ,_UpperCAmelCase : Union[str, Any]=0.6 ,_UpperCAmelCase : Optional[Any]=None ,):
_a : Optional[int] = parent
_a : List[Any] = batch_size
_a : Tuple = image_size
_a : int = patch_size
_a : List[Any] = num_channels
_a : int = is_training
_a : List[Any] = use_labels
_a : List[str] = hidden_size
_a : Any = num_hidden_layers
_a : Dict = num_attention_heads
_a : List[str] = intermediate_size
_a : Any = hidden_act
_a : int = hidden_dropout_prob
_a : Union[str, Any] = attention_probs_dropout_prob
_a : List[Any] = type_sequence_label_size
_a : Union[str, Any] = initializer_range
_a : Optional[Any] = mask_ratio
_a : Dict = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_a : Dict = (image_size // patch_size) ** 2
_a : List[Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __lowercase ( self : str ):
_a : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Any = None
if self.use_labels:
_a : Any = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_a : Dict = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Union[str, Any] ):
return ViTMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,decoder_hidden_size=self.hidden_size ,decoder_num_hidden_layers=self.num_hidden_layers ,decoder_num_attention_heads=self.num_attention_heads ,decoder_intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_UpperCamelCase ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,)
def __lowercase ( self : Tuple ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Any ,_UpperCAmelCase : Dict ):
_a : Optional[Any] = TFViTMAEModel(config=_UpperCamelCase )
_a : Tuple = model(_UpperCamelCase ,training=_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : Any ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Dict ):
_a : str = TFViTMAEForPreTraining(_UpperCamelCase )
_a : List[Any] = model(_UpperCamelCase ,training=_UpperCamelCase )
# expected sequence length = num_patches
_a : List[Any] = (self.image_size // self.patch_size) ** 2
_a : Union[str, Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_a : Optional[Any] = 1
_a : int = TFViTMAEForPreTraining(_UpperCamelCase )
_a : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a : Union[str, Any] = model(_UpperCamelCase ,training=_UpperCamelCase )
_a : Any = self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
def __lowercase ( self : int ):
_a : Tuple = self.prepare_config_and_inputs()
(_a) : Dict = config_and_inputs
_a : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __magic_name__ ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCAmelCase : Optional[int] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
lowerCAmelCase : int = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {}
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : Tuple = False
lowerCAmelCase : Optional[int] = False
lowerCAmelCase : List[str] = False
def __lowercase ( self : List[str] ):
_a : Dict = TFViTMAEModelTester(self )
_a : List[str] = ConfigTester(self ,config_class=_UpperCamelCase ,has_text_modality=_UpperCamelCase ,hidden_size=37 )
def __lowercase ( self : Optional[int] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def __lowercase ( self : List[str] ):
pass
def __lowercase ( self : Union[str, Any] ):
_a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Union[str, Any] = model_class(_UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) )
_a : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase ,tf.keras.layers.Layer ) )
def __lowercase ( self : Tuple ):
_a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Any = model_class(_UpperCamelCase )
_a : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : List[Any] = [*signature.parameters.keys()]
_a : int = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,_UpperCamelCase )
def __lowercase ( self : Any ):
_a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __lowercase ( self : int ):
_a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_UpperCamelCase )
def __lowercase ( self : List[Any] ):
# make the mask reproducible
np.random.seed(2 )
_a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
_a : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 )
_a : str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_a : Optional[int] = model_class(_UpperCamelCase )
_a : int = self._prepare_for_class(_UpperCamelCase ,_UpperCamelCase )
_a : Any = model(_UpperCamelCase ,noise=_UpperCamelCase )
_a : List[Any] = copy.deepcopy(self._prepare_for_class(_UpperCamelCase ,_UpperCamelCase ) )
_a : Dict = model(**_UpperCamelCase ,noise=_UpperCamelCase )
_a : Optional[Any] = outputs_dict[0].numpy()
_a : Dict = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) ,1E-6 )
def __lowercase ( self : Optional[int] ):
# make the mask reproducible
np.random.seed(2 )
_a : int = self.model_tester.prepare_config_and_inputs_for_common()
_a : int = int((config.image_size // config.patch_size) ** 2 )
_a : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(_UpperCAmelCase : Tuple ):
_a : Union[str, Any] = {}
for k, v in inputs_dict.items():
if tf.is_tensor(_UpperCamelCase ):
_a : Any = v.numpy()
else:
_a : int = np.array(_UpperCamelCase )
return inputs_np_dict
for model_class in self.all_model_classes:
_a : List[str] = model_class(_UpperCamelCase )
_a : Tuple = self._prepare_for_class(_UpperCamelCase ,_UpperCamelCase )
_a : str = prepare_numpy_arrays(_UpperCamelCase )
_a : Dict = model(_UpperCamelCase ,noise=_UpperCamelCase )
_a : int = model(**_UpperCamelCase ,noise=_UpperCamelCase )
self.assert_outputs_same(_UpperCamelCase ,_UpperCamelCase )
def __lowercase ( self : List[Any] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Optional[Any] ):
# make masks reproducible
np.random.seed(2 )
_a : int = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
_a : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_a : int = tf.constant(_UpperCamelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_a : Dict = tf_noise
super().check_pt_tf_models(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
def __lowercase ( self : Dict ):
# make mask reproducible
np.random.seed(2 )
_a : str = self.model_tester.prepare_config_and_inputs_for_common()
_a : Optional[int] = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(_UpperCamelCase )
if module_member_name.endswith('MainLayer' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('MainLayer' )] == model_class.__name__[: -len('Model' )]
for module_member in (getattr(_UpperCamelCase ,_UpperCamelCase ),)
if isinstance(_UpperCamelCase ,_UpperCamelCase )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(_UpperCamelCase ,'_keras_serializable' ,_UpperCamelCase )
}
_a : Optional[int] = int((config.image_size // config.patch_size) ** 2 )
_a : str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_a : Any = tf.convert_to_tensor(_UpperCamelCase )
inputs_dict.update({'noise': noise} )
for main_layer_class in tf_main_layer_classes:
_a : Dict = main_layer_class(_UpperCamelCase )
_a : Union[str, Any] = {
name: tf.keras.Input(tensor.shape[1:] ,dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
_a : Any = tf.keras.Model(_UpperCamelCase ,outputs=main_layer(_UpperCamelCase ) )
_a : List[Any] = model(_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Optional[int] = os.path.join(_UpperCamelCase ,'keras_model.h5' )
model.save(_UpperCamelCase )
_a : Optional[int] = tf.keras.models.load_model(
_UpperCamelCase ,custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(_UpperCamelCase ,tf.keras.Model )
_a : Tuple = model(_UpperCamelCase )
self.assert_outputs_same(_UpperCamelCase ,_UpperCamelCase )
@slow
def __lowercase ( self : Dict ):
# make mask reproducible
np.random.seed(2 )
_a : Any = self.model_tester.prepare_config_and_inputs_for_common()
_a : List[Any] = int((config.image_size // config.patch_size) ** 2 )
_a : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_a : int = model_class(_UpperCamelCase )
_a : List[Any] = self._prepare_for_class(_UpperCamelCase ,_UpperCamelCase )
_a : Any = model(_UpperCamelCase ,noise=_UpperCamelCase )
if model_class.__name__ == "TFViTMAEModel":
_a : List[Any] = outputs.last_hidden_state.numpy()
_a : Optional[Any] = 0
else:
_a : int = outputs.logits.numpy()
_a : Dict = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCamelCase ,saved_model=_UpperCamelCase )
_a : int = model_class.from_pretrained(_UpperCamelCase )
_a : Union[str, Any] = model(_UpperCamelCase ,noise=_UpperCamelCase )
if model_class.__name__ == "TFViTMAEModel":
_a : Optional[int] = after_outputs["""last_hidden_state"""].numpy()
_a : List[Any] = 0
else:
_a : Any = after_outputs["""logits"""].numpy()
_a : Any = 0
_a : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_UpperCamelCase ,1E-5 )
def __lowercase ( self : Union[str, Any] ):
# make mask reproducible
np.random.seed(2 )
_a : str = self.model_tester.prepare_config_and_inputs_for_common()
_a : Tuple = int((config.image_size // config.patch_size) ** 2 )
_a : str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_a : str = model_class(_UpperCamelCase )
_a : Dict = self._prepare_for_class(_UpperCamelCase ,_UpperCamelCase )
_a : List[Any] = model(_UpperCamelCase ,noise=_UpperCamelCase )
_a : List[Any] = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(_UpperCamelCase )
_a : Optional[Any] = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
_a : str = model_class.from_config(model.config )
_a : List[str] = new_model(_UpperCamelCase ) # Build model
new_model.set_weights(model.get_weights() )
_a : Optional[Any] = new_model(_UpperCamelCase ,noise=_UpperCamelCase )
self.assert_outputs_same(_UpperCamelCase ,_UpperCamelCase )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def __lowercase ( self : Tuple ):
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def __lowercase ( self : Dict ):
pass
@slow
def __lowercase ( self : int ):
_a : List[str] = TFViTMAEModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(_UpperCamelCase )
def __lowerCamelCase ( ) -> List[Any]:
_a : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __magic_name__ ( unittest.TestCase ):
@cached_property
def __lowercase ( self : List[str] ):
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def __lowercase ( self : Union[str, Any] ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
_a : int = TFViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' )
_a : Any = self.default_image_processor
_a : str = prepare_img()
_a : Dict = image_processor(images=_UpperCamelCase ,return_tensors='tf' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_a : Any = ViTMAEConfig()
_a : Optional[int] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_a : Any = np.random.uniform(size=(1, num_patches) )
# forward pass
_a : Optional[Any] = model(**_UpperCamelCase ,noise=_UpperCamelCase )
# verify the logits
_a : Optional[Any] = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape ,_UpperCamelCase )
_a : Any = tf.convert_to_tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] ,_UpperCamelCase ,atol=1E-4 )
| 358 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase = {
'''configuration_data2vec_audio''': ['''DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Data2VecAudioConfig'''],
'''configuration_data2vec_text''': [
'''DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecTextConfig''',
'''Data2VecTextOnnxConfig''',
],
'''configuration_data2vec_vision''': [
'''DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecVisionConfig''',
'''Data2VecVisionOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecAudioForAudioFrameClassification''',
'''Data2VecAudioForCTC''',
'''Data2VecAudioForSequenceClassification''',
'''Data2VecAudioForXVector''',
'''Data2VecAudioModel''',
'''Data2VecAudioPreTrainedModel''',
]
__lowerCAmelCase = [
'''DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecTextForCausalLM''',
'''Data2VecTextForMaskedLM''',
'''Data2VecTextForMultipleChoice''',
'''Data2VecTextForQuestionAnswering''',
'''Data2VecTextForSequenceClassification''',
'''Data2VecTextForTokenClassification''',
'''Data2VecTextModel''',
'''Data2VecTextPreTrainedModel''',
]
__lowerCAmelCase = [
'''DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecVisionForImageClassification''',
'''Data2VecVisionForMaskedImageModeling''',
'''Data2VecVisionForSemanticSegmentation''',
'''Data2VecVisionModel''',
'''Data2VecVisionPreTrainedModel''',
]
if is_tf_available():
__lowerCAmelCase = [
'''TFData2VecVisionForImageClassification''',
'''TFData2VecVisionForSemanticSegmentation''',
'''TFData2VecVisionModel''',
'''TFData2VecVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 107 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A )
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = field(default="""summarization""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
lowerCAmelCase__ = Features({"""text""": Value("""string""" )} )
lowerCAmelCase__ = Features({"""summary""": Value("""string""" )} )
lowerCAmelCase__ = "text"
lowerCAmelCase__ = "summary"
@property
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return {self.text_column: "text", self.summary_column: "summary"}
| 166 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""alibaba-damo/mgp-str-base""": """https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json""",
}
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = """mgp-str"""
def __init__( self : int , _lowerCAmelCase : str=[3_2, 1_2_8] , _lowerCAmelCase : Dict=4 , _lowerCAmelCase : int=3 , _lowerCAmelCase : str=2_7 , _lowerCAmelCase : List[str]=3_8 , _lowerCAmelCase : Tuple=5_0_2_5_7 , _lowerCAmelCase : str=3_0_5_2_2 , _lowerCAmelCase : Optional[int]=7_6_8 , _lowerCAmelCase : Optional[int]=1_2 , _lowerCAmelCase : Optional[Any]=1_2 , _lowerCAmelCase : Optional[int]=4.0 , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : List[Any]=1e-5 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : str=0.0 , _lowerCAmelCase : Tuple=0.0 , _lowerCAmelCase : str=False , _lowerCAmelCase : List[Any]=0.02 , **_lowerCAmelCase : Optional[Any] , ):
'''simple docstring'''
super().__init__(**_lowerCAmelCase)
__lowercase =image_size
__lowercase =patch_size
__lowercase =num_channels
__lowercase =max_token_length
__lowercase =num_character_labels
__lowercase =num_bpe_labels
__lowercase =num_wordpiece_labels
__lowercase =hidden_size
__lowercase =num_hidden_layers
__lowercase =num_attention_heads
__lowercase =mlp_ratio
__lowercase =distilled
__lowercase =layer_norm_eps
__lowercase =drop_rate
__lowercase =qkv_bias
__lowercase =attn_drop_rate
__lowercase =drop_path_rate
__lowercase =output_aa_attentions
__lowercase =initializer_range
| 166 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {}
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = """llama"""
__magic_name__ :Optional[int] = ["""past_key_values"""]
def __init__( self , __UpperCAmelCase=3_2_0_0_0 , __UpperCAmelCase=4_0_9_6 , __UpperCAmelCase=1_1_0_0_8 , __UpperCAmelCase=3_2 , __UpperCAmelCase=3_2 , __UpperCAmelCase=None , __UpperCAmelCase="silu" , __UpperCAmelCase=2_0_4_8 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-6 , __UpperCAmelCase=True , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=False , __UpperCAmelCase=None , **__UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = vocab_size
lowerCAmelCase__ :Optional[Any] = max_position_embeddings
lowerCAmelCase__ :Tuple = hidden_size
lowerCAmelCase__ :Dict = intermediate_size
lowerCAmelCase__ :Any = num_hidden_layers
lowerCAmelCase__ :int = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
lowerCAmelCase__ :Union[str, Any] = num_attention_heads
lowerCAmelCase__ :int = num_key_value_heads
lowerCAmelCase__ :Any = hidden_act
lowerCAmelCase__ :str = initializer_range
lowerCAmelCase__ :Dict = rms_norm_eps
lowerCAmelCase__ :Dict = pretraining_tp
lowerCAmelCase__ :Any = use_cache
lowerCAmelCase__ :Union[str, Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase , )
def snake_case ( self ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F"got {self.rope_scaling}" )
lowerCAmelCase__ :Union[str, Any] = self.rope_scaling.get('type' , __UpperCAmelCase )
lowerCAmelCase__ :Tuple = self.rope_scaling.get('factor' , __UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 254 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Tuple = """facebook/bart-large-mnli"""
__magic_name__ :Any = (
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
__magic_name__ :Optional[int] = """text_classifier"""
__magic_name__ :List[Any] = AutoTokenizer
__magic_name__ :str = AutoModelForSequenceClassification
__magic_name__ :int = ["""text""", ["""text"""]]
__magic_name__ :int = ["""text"""]
def snake_case ( self ):
'''simple docstring'''
super().setup()
lowerCAmelCase__ :Any = self.model.config
lowerCAmelCase__ :Any = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
lowerCAmelCase__ :Optional[Any] = int(__UpperCAmelCase )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = labels
return self.pre_processor(
[text] * len(__UpperCAmelCase ) , [F"This example is {label}" for label in labels] , return_tensors='pt' , padding='max_length' , )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = outputs.logits
lowerCAmelCase__ :int = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 254 | 1 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[int]=1_3 , _UpperCamelCase : Union[str, Any]=3_0 , _UpperCamelCase : str=2 , _UpperCamelCase : Optional[int]=3 , _UpperCamelCase : Dict=True , _UpperCamelCase : Dict=True , _UpperCamelCase : List[Any]=3_2 , _UpperCamelCase : Optional[Any]=2 , _UpperCamelCase : List[Any]=4 , _UpperCamelCase : Optional[Any]=3_7 , _UpperCamelCase : Any="gelu" , _UpperCamelCase : Union[str, Any]=0.1 , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : List[Any]=1_0 , _UpperCamelCase : List[str]=0.02 , _UpperCamelCase : List[Any]=3 , _UpperCamelCase : Union[str, Any]=0.6 , _UpperCamelCase : str=None , ) ->Any:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = mask_ratio
snake_case_ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
snake_case_ = (image_size // patch_size) ** 2
snake_case_ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def snake_case__( self : List[str] ) ->Optional[Any]:
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = self.get_config()
return config, pixel_values, labels
def snake_case__( self : Optional[int] ) ->str:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def snake_case__( self : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any ) ->Union[str, Any]:
snake_case_ = TFViTMAEModel(config=_UpperCamelCase )
snake_case_ = model(_UpperCamelCase , training=_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__( self : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any] ) ->Optional[int]:
snake_case_ = TFViTMAEForPreTraining(_UpperCamelCase )
snake_case_ = model(_UpperCamelCase , training=_UpperCamelCase )
# expected sequence length = num_patches
snake_case_ = (self.image_size // self.patch_size) ** 2
snake_case_ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
snake_case_ = 1
snake_case_ = TFViTMAEForPreTraining(_UpperCamelCase )
snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ = model(_UpperCamelCase , training=_UpperCamelCase )
snake_case_ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def snake_case__( self : Union[str, Any] ) ->Dict:
snake_case_ = self.prepare_config_and_inputs()
((snake_case_), (snake_case_), (snake_case_)) = config_and_inputs
snake_case_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class snake_case_ ( __A , __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
SCREAMING_SNAKE_CASE : Tuple = {"feature-extraction": TFViTMAEModel} if is_tf_available() else {}
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
def snake_case__( self : Dict ) ->List[Any]:
snake_case_ = TFViTMAEModelTester(self )
snake_case_ = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=3_7 )
def snake_case__( self : Dict ) ->int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def snake_case__( self : Optional[int] ) ->int:
pass
def snake_case__( self : Optional[Any] ) ->int:
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(_UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
snake_case_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase , tf.keras.layers.Layer ) )
def snake_case__( self : Tuple ) ->Union[str, Any]:
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(_UpperCamelCase )
snake_case_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def snake_case__( self : Dict ) ->int:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def snake_case__( self : List[str] ) ->Dict:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_UpperCamelCase )
def snake_case__( self : Any ) ->Optional[Any]:
# make the mask reproducible
np.random.seed(2 )
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = int((config.image_size // config.patch_size) ** 2 )
snake_case_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case_ = model_class(_UpperCamelCase )
snake_case_ = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase )
snake_case_ = model(_UpperCamelCase , noise=_UpperCamelCase )
snake_case_ = copy.deepcopy(self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
snake_case_ = model(**_UpperCamelCase , noise=_UpperCamelCase )
snake_case_ = outputs_dict[0].numpy()
snake_case_ = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def snake_case__( self : Optional[int] ) ->Optional[Any]:
# make the mask reproducible
np.random.seed(2 )
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = int((config.image_size // config.patch_size) ** 2 )
snake_case_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(_UpperCamelCase : str ):
snake_case_ = {}
for k, v in inputs_dict.items():
if tf.is_tensor(_UpperCamelCase ):
snake_case_ = v.numpy()
else:
snake_case_ = np.array(_UpperCamelCase )
return inputs_np_dict
for model_class in self.all_model_classes:
snake_case_ = model_class(_UpperCamelCase )
snake_case_ = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase )
snake_case_ = prepare_numpy_arrays(_UpperCamelCase )
snake_case_ = model(_UpperCamelCase , noise=_UpperCamelCase )
snake_case_ = model(**_UpperCamelCase , noise=_UpperCamelCase )
self.assert_outputs_same(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Any , _UpperCamelCase : List[str] , _UpperCamelCase : str , _UpperCamelCase : Dict ) ->Dict:
# make masks reproducible
np.random.seed(2 )
snake_case_ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
snake_case_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
snake_case_ = tf.constant(_UpperCamelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
snake_case_ = tf_noise
super().check_pt_tf_models(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Optional[int] ) ->str:
# make mask reproducible
np.random.seed(2 )
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(_UpperCamelCase )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(_UpperCamelCase , _UpperCamelCase ),)
if isinstance(_UpperCamelCase , _UpperCamelCase )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(_UpperCamelCase , '''_keras_serializable''' , _UpperCamelCase )
}
snake_case_ = int((config.image_size // config.patch_size) ** 2 )
snake_case_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
snake_case_ = tf.convert_to_tensor(_UpperCamelCase )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
snake_case_ = main_layer_class(_UpperCamelCase )
snake_case_ = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
snake_case_ = tf.keras.Model(_UpperCamelCase , outputs=main_layer(_UpperCamelCase ) )
snake_case_ = model(_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ = os.path.join(_UpperCamelCase , '''keras_model.h5''' )
model.save(_UpperCamelCase )
snake_case_ = tf.keras.models.load_model(
_UpperCamelCase , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(_UpperCamelCase , tf.keras.Model )
snake_case_ = model(_UpperCamelCase )
self.assert_outputs_same(_UpperCamelCase , _UpperCamelCase )
@slow
def snake_case__( self : List[Any] ) ->str:
# make mask reproducible
np.random.seed(2 )
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = int((config.image_size // config.patch_size) ** 2 )
snake_case_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case_ = model_class(_UpperCamelCase )
snake_case_ = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase )
snake_case_ = model(_UpperCamelCase , noise=_UpperCamelCase )
if model_class.__name__ == "TFViTMAEModel":
snake_case_ = outputs.last_hidden_state.numpy()
snake_case_ = 0
else:
snake_case_ = outputs.logits.numpy()
snake_case_ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCamelCase , saved_model=_UpperCamelCase )
snake_case_ = model_class.from_pretrained(_UpperCamelCase )
snake_case_ = model(_UpperCamelCase , noise=_UpperCamelCase )
if model_class.__name__ == "TFViTMAEModel":
snake_case_ = after_outputs['''last_hidden_state'''].numpy()
snake_case_ = 0
else:
snake_case_ = after_outputs['''logits'''].numpy()
snake_case_ = 0
snake_case_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_UpperCamelCase , 1e-5 )
def snake_case__( self : int ) ->List[str]:
# make mask reproducible
np.random.seed(2 )
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = int((config.image_size // config.patch_size) ** 2 )
snake_case_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case_ = model_class(_UpperCamelCase )
snake_case_ = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase )
snake_case_ = model(_UpperCamelCase , noise=_UpperCamelCase )
snake_case_ = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(_UpperCamelCase )
snake_case_ = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
snake_case_ = model_class.from_config(model.config )
snake_case_ = new_model(_UpperCamelCase ) # Build model
new_model.set_weights(model.get_weights() )
snake_case_ = new_model(_UpperCamelCase , noise=_UpperCamelCase )
self.assert_outputs_same(_UpperCamelCase , _UpperCamelCase )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def snake_case__( self : Any ) ->List[Any]:
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def snake_case__( self : Optional[int] ) ->Optional[int]:
pass
@slow
def snake_case__( self : Any ) ->List[str]:
snake_case_ = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ():
snake_case_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__( self : Optional[int] ) ->Any:
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def snake_case__( self : Dict ) ->str:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
snake_case_ = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=_UpperCamelCase , return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
snake_case_ = ViTMAEConfig()
snake_case_ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
snake_case_ = np.random.uniform(size=(1, num_patches) )
# forward pass
snake_case_ = model(**_UpperCamelCase , noise=_UpperCamelCase )
# verify the logits
snake_case_ = tf.convert_to_tensor([1, 1_9_6, 7_6_8] )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
snake_case_ = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , _UpperCamelCase , atol=1e-4 ) | 8 | import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
_UpperCAmelCase = """scheduler_config.json"""
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = 1
lowerCamelCase_ = 2
lowerCamelCase_ = 3
lowerCamelCase_ = 4
lowerCamelCase_ = 5
lowerCamelCase_ = 6
lowerCamelCase_ = 7
lowerCamelCase_ = 8
lowerCamelCase_ = 9
lowerCamelCase_ = 1_0
lowerCamelCase_ = 1_1
lowerCamelCase_ = 1_2
lowerCamelCase_ = 1_3
lowerCamelCase_ = 1_4
@dataclass
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = 42
class UpperCAmelCase :
'''simple docstring'''
lowerCamelCase_ = SCHEDULER_CONFIG_NAME
lowerCamelCase_ = []
lowerCamelCase_ = True
@classmethod
def lowerCAmelCase_ ( cls , lowercase = None , lowercase = None , lowercase=False , **lowercase , ):
"""simple docstring"""
A_ , A_ , A_ : int = cls.load_config(
pretrained_model_name_or_path=lowercase , subfolder=lowercase , return_unused_kwargs=lowercase , return_commit_hash=lowercase , **lowercase , )
return cls.from_config(lowercase , return_unused_kwargs=lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase = False , **lowercase ):
"""simple docstring"""
self.save_config(save_directory=lowercase , push_to_hub=lowercase , **lowercase )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self._get_compatibles()
@classmethod
def lowerCAmelCase_ ( cls ):
"""simple docstring"""
A_ : Optional[Any] = list(set([cls.__name__] + cls._compatibles ) )
A_ : Any = importlib.import_module(__name__.split('.' )[0] )
A_ : Tuple = [
getattr(lowercase , lowercase ) for c in compatible_classes_str if hasattr(lowercase , lowercase )
]
return compatible_classes
| 140 | 0 |
class SCREAMING_SNAKE_CASE__ :
def __init__( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = name
A__ = value
A__ = weight
def __repr__( self ):
return f"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"
def UpperCamelCase ( self ):
return self.value
def UpperCamelCase ( self ):
return self.name
def UpperCamelCase ( self ):
return self.weight
def UpperCamelCase ( self ):
return self.value / self.weight
def UpperCamelCase__( UpperCamelCase__ : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] )->Dict:
A__ = []
for i in range(len(UpperCamelCase__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def UpperCamelCase__( UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] )->int:
A__ = sorted(UpperCamelCase__ , key=UpperCamelCase__ , reverse=UpperCamelCase__ )
A__ = []
A__ , A__ = 0.0, 0.0
for i in range(len(UpperCamelCase__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def UpperCamelCase__( )->Tuple:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39 |
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
A__ = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCamelCase ) )
def UpperCamelCase ( self ):
A__ = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCamelCase ) )
def UpperCamelCase ( self ):
A__ = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__lowerCamelCase ) )
def UpperCamelCase ( self ):
A__ = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCamelCase ) )
def UpperCamelCase ( self ):
A__ = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(__lowerCamelCase ) )
def UpperCamelCase ( self ):
A__ = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
A__ = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCamelCase,variant=__lowerCamelCase ) )
def UpperCamelCase ( self ):
A__ = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
A__ = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCamelCase,variant=__lowerCamelCase ) )
def UpperCamelCase ( self ):
# pass variant but use the non-variant filenames
A__ = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
A__ = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCamelCase,variant=__lowerCamelCase ) )
def UpperCamelCase ( self ):
A__ = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
A__ = '''fp16'''
self.assertFalse(is_safetensors_compatible(__lowerCamelCase,variant=__lowerCamelCase ) )
def UpperCamelCase ( self ):
A__ = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
A__ = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCamelCase,variant=__lowerCamelCase ) )
def UpperCamelCase ( self ):
# pass variant but use the non-variant filenames
A__ = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
A__ = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCamelCase,variant=__lowerCamelCase ) )
def UpperCamelCase ( self ):
A__ = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
A__ = '''fp16'''
self.assertFalse(is_safetensors_compatible(__lowerCamelCase,variant=__lowerCamelCase ) )
| 39 | 1 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class _A:
"""simple docstring"""
UpperCamelCase : float
UpperCamelCase : TreeNode | None = None
UpperCamelCase : TreeNode | None = None
def _SCREAMING_SNAKE_CASE ( a ) -> bool:
# Validation
def is_valid_tree(a ) -> bool:
if node is None:
return True
if not isinstance(a , a ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(a ):
raise ValueError(
'Each node should be type of TreeNode and data should be float.' )
def is_binary_search_tree_recursive_check(
a , a , a ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , a , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , a )
)
return is_binary_search_tree_recursive_check(a , -float('inf' ) , float('inf' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 |
def _SCREAMING_SNAKE_CASE ( a ) -> Tuple:
__A , __A : Optional[Any] = [], []
while len(a ) > 1:
__A , __A : Any = min(a ), max(a )
start.append(a )
end.append(a )
collection.remove(a )
collection.remove(a )
end.reverse()
return start + collection + end
if __name__ == "__main__":
UpperCAmelCase : int = input('''Enter numbers separated by a comma:\n''').strip()
UpperCAmelCase : Dict = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 280 | 1 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class SCREAMING_SNAKE_CASE__ :
def __init__( self,__lowerCamelCase,__lowerCamelCase=99,__lowerCamelCase=13,__lowerCamelCase=16,__lowerCamelCase=7,__lowerCamelCase=True,__lowerCamelCase=True,__lowerCamelCase=True,__lowerCamelCase=False,__lowerCamelCase=True,__lowerCamelCase=2,__lowerCamelCase=32,__lowerCamelCase=4,__lowerCamelCase=4,__lowerCamelCase=30,__lowerCamelCase=0,__lowerCamelCase=1,__lowerCamelCase=2,__lowerCamelCase=None,):
A__ = parent
A__ = batch_size
A__ = decoder_seq_length
# For common tests
A__ = self.decoder_seq_length
A__ = is_training
A__ = use_attention_mask
A__ = use_labels
A__ = vocab_size
A__ = d_model
A__ = d_model
A__ = decoder_layers
A__ = decoder_layers
A__ = decoder_ffn_dim
A__ = decoder_attention_heads
A__ = decoder_attention_heads
A__ = eos_token_id
A__ = bos_token_id
A__ = pad_token_id
A__ = decoder_start_token_id
A__ = use_cache
A__ = max_position_embeddings
A__ = None
A__ = decoder_seq_length
A__ = 2
A__ = 1
def UpperCamelCase ( self ):
A__ = ids_tensor([self.batch_size, self.decoder_seq_length],self.vocab_size )
A__ = None
if self.use_attention_mask:
A__ = ids_tensor([self.batch_size, self.decoder_seq_length],vocab_size=2 )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size, self.decoder_seq_length],self.vocab_size )
A__ = TrOCRConfig(
vocab_size=self.vocab_size,d_model=self.d_model,decoder_layers=self.decoder_layers,decoder_ffn_dim=self.decoder_ffn_dim,decoder_attention_heads=self.decoder_attention_heads,eos_token_id=self.eos_token_id,bos_token_id=self.bos_token_id,use_cache=self.use_cache,pad_token_id=self.pad_token_id,decoder_start_token_id=self.decoder_start_token_id,max_position_embeddings=self.max_position_embeddings,)
return (config, input_ids, attention_mask, lm_labels)
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,):
A__ = True
A__ = TrOCRDecoder(config=__lowerCamelCase ).to(__lowerCamelCase ).eval()
A__ = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
A__ = model(__lowerCamelCase,use_cache=__lowerCamelCase )
A__ = model(__lowerCamelCase )
A__ = model(__lowerCamelCase,use_cache=__lowerCamelCase )
self.parent.assertTrue(len(__lowerCamelCase ) == len(__lowerCamelCase ) )
self.parent.assertTrue(len(__lowerCamelCase ) == len(__lowerCamelCase ) + 1 )
A__ = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
A__ = ids_tensor((2, 1),config.vocab_size - 1 ) + 1
# append to next input_ids and
A__ = torch.cat([input_ids, next_tokens],dim=-1 )
A__ = model(__lowerCamelCase )['''last_hidden_state''']
A__ = model(__lowerCamelCase,past_key_values=__lowerCamelCase )['''last_hidden_state''']
# select random slice
A__ = ids_tensor((1,),output_from_past.shape[-1] ).item()
A__ = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
A__ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(__lowerCamelCase,__lowerCamelCase,atol=1E-3 )
def UpperCamelCase ( self ):
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ = config_and_inputs
A__ = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE = (TrOCRForCausalLM,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self ):
A__ = TrOCRStandaloneDecoderModelTester(self,is_training=__lowerCamelCase )
A__ = ConfigTester(self,config_class=__lowerCamelCase )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*__lowerCamelCase )
def UpperCamelCase ( self ):
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def UpperCamelCase ( self ):
pass
| 39 |
a__: dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.60_93_44,
"knot": 1.8_52,
}
a__: dict[str, float] = {
"km/h": 1.0,
"m/s": 0.2_77_77_77_78,
"mph": 0.6_21_37_11_92,
"knot": 0.5_39_95_68_03,
}
def UpperCamelCase__( UpperCamelCase__ : float , UpperCamelCase__ : str , UpperCamelCase__ : str )->float:
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
A__ = (
f"Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n"
f"Valid values are: {', '.join(UpperCamelCase__ )}"
)
raise ValueError(UpperCamelCase__ )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39 | 1 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
lowercase : Optional[int] = logging.get_logger(__name__)
lowercase : Dict = R"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
"""
class __snake_case ( lowerCAmelCase ):
@add_start_docstrings(snake_case )
def __call__( self ,snake_case ,snake_case ,**snake_case ):
'''simple docstring'''
raise NotImplementedError("""StoppingCriteria needs to be subclassed""" )
class __snake_case ( lowerCAmelCase ):
def __init__( self ,snake_case ,snake_case = None ):
'''simple docstring'''
lowercase : Optional[int] = max_length
lowercase : Union[str, Any] = max_position_embeddings
@add_start_docstrings(snake_case )
def __call__( self ,snake_case ,snake_case ,**snake_case ):
'''simple docstring'''
lowercase : Optional[int] = input_ids.shape[-1]
lowercase : Dict = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"""This is a friendly reminder - the current text generation call will exceed the model's predefined """
f"maximum length ({self.max_position_embeddings}). Depending on the model, you may observe "
"""exceptions, performance degradation, or nothing at all.""" )
return is_done
class __snake_case ( lowerCAmelCase ):
def __init__( self ,snake_case ,snake_case ):
'''simple docstring'''
warnings.warn(
"""The class `MaxNewTokensCriteria` is deprecated. """
f"Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` "
"""with `max_length = start_length + max_new_tokens` instead.""" ,snake_case ,)
lowercase : Dict = start_length
lowercase : Any = max_new_tokens
lowercase : List[Any] = start_length + max_new_tokens
@add_start_docstrings(snake_case )
def __call__( self ,snake_case ,snake_case ,**snake_case ):
'''simple docstring'''
return input_ids.shape[-1] >= self.max_length
class __snake_case ( lowerCAmelCase ):
def __init__( self ,snake_case ,snake_case = None ):
'''simple docstring'''
lowercase : List[Any] = max_time
lowercase : Optional[Any] = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(snake_case )
def __call__( self ,snake_case ,snake_case ,**snake_case ):
'''simple docstring'''
return time.time() - self.initial_timestamp > self.max_time
class __snake_case ( lowerCAmelCase ):
@add_start_docstrings(snake_case )
def __call__( self ,snake_case ,snake_case ,**snake_case ):
'''simple docstring'''
return any(criteria(snake_case ,snake_case ) for criteria in self )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for stopping_criterium in self:
if isinstance(snake_case ,snake_case ):
return stopping_criterium.max_length
elif isinstance(snake_case ,snake_case ):
return stopping_criterium.max_length
return None
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> StoppingCriteriaList:
lowercase : str = stopping_criteria.max_length
lowercase : int = deepcopy(SCREAMING_SNAKE_CASE__ )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("""You set different `max_length` for stopping criteria and `max_length` parameter""" , SCREAMING_SNAKE_CASE__ )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=SCREAMING_SNAKE_CASE__ ) )
return new_stopping_criteria
| 20 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
a_ :str = logging.get_logger(__name__)
def lowercase_ (A : str ):
snake_case__ : Tuple = SwinConfig.from_pretrained(
'microsoft/swin-tiny-patch4-window7-224' , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
snake_case__ : List[Any] = MaskFormerConfig(backbone_config=A )
snake_case__ : Union[str, Any] = 'huggingface/label-files'
if "ade20k-full" in model_name:
# this should be ok
snake_case__ : Dict = 8_4_7
snake_case__ : List[str] = 'maskformer-ade20k-full-id2label.json'
elif "ade" in model_name:
# this should be ok
snake_case__ : Union[str, Any] = 1_5_0
snake_case__ : Any = 'ade20k-id2label.json'
elif "coco-stuff" in model_name:
# this should be ok
snake_case__ : List[str] = 1_7_1
snake_case__ : Union[str, Any] = 'maskformer-coco-stuff-id2label.json'
elif "coco" in model_name:
# TODO
snake_case__ : Dict = 1_3_3
snake_case__ : str = 'coco-panoptic-id2label.json'
elif "cityscapes" in model_name:
# this should be ok
snake_case__ : List[str] = 1_9
snake_case__ : Union[str, Any] = 'cityscapes-id2label.json'
elif "vistas" in model_name:
# this should be ok
snake_case__ : Tuple = 6_5
snake_case__ : List[str] = 'mapillary-vistas-id2label.json'
snake_case__ : Dict = json.load(open(hf_hub_download(A , A , repo_type='dataset' ) , 'r' ) )
snake_case__ : List[str] = {int(A ): v for k, v in idalabel.items()}
return config
def lowercase_ (A : Any ):
snake_case__ : Optional[int] = []
# stem
# fmt: off
rename_keys.append(('backbone.patch_embed.proj.weight', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.proj.bias', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'model.pixel_level_module.encoder.model.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'model.pixel_level_module.encoder.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((F'''backbone.layers.{i}.downsample.reduction.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((F'''backbone.layers.{i}.downsample.norm.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((F'''backbone.layers.{i}.downsample.norm.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') )
# FPN
rename_keys.append(('sem_seg_head.layer_4.weight', 'model.pixel_level_module.decoder.fpn.stem.0.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.weight', 'model.pixel_level_module.decoder.fpn.stem.1.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.bias', 'model.pixel_level_module.decoder.fpn.stem.1.bias') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') )
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') )
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') )
rename_keys.append(('sem_seg_head.mask_features.weight', 'model.pixel_level_module.decoder.mask_projection.weight') )
rename_keys.append(('sem_seg_head.mask_features.bias', 'model.pixel_level_module.decoder.mask_projection.bias') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') )
# cross-attention out projection
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') )
# MLP 1
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') )
# MLP 2
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') )
# layernorm 3 (final layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.weight', 'model.transformer_module.decoder.layernorm.weight') )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.bias', 'model.transformer_module.decoder.layernorm.bias') )
# heads on top
rename_keys.append(('sem_seg_head.predictor.query_embed.weight', 'model.transformer_module.queries_embedder.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.weight', 'model.transformer_module.input_projection.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.bias', 'model.transformer_module.input_projection.bias') )
rename_keys.append(('sem_seg_head.predictor.class_embed.weight', 'class_predictor.weight') )
rename_keys.append(('sem_seg_head.predictor.class_embed.bias', 'class_predictor.bias') )
for i in range(3 ):
rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', F'''mask_embedder.{i}.0.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', F'''mask_embedder.{i}.0.bias''') )
# fmt: on
return rename_keys
def lowercase_ (A : Tuple , A : Tuple , A : Optional[Any] ):
snake_case__ : Optional[int] = dct.pop(A )
snake_case__ : Union[str, Any] = val
def lowercase_ (A : Optional[Any] , A : Tuple ):
snake_case__ : Optional[int] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
snake_case__ : Optional[int] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
snake_case__ : int = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' )
snake_case__ : Tuple = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : str = in_proj_weight[:dim, :]
snake_case__ : int = in_proj_bias[: dim]
snake_case__ : List[Any] = in_proj_weight[
dim : dim * 2, :
]
snake_case__ : List[str] = in_proj_bias[
dim : dim * 2
]
snake_case__ : List[Any] = in_proj_weight[
-dim :, :
]
snake_case__ : Dict = in_proj_bias[-dim :]
# fmt: on
def lowercase_ (A : List[str] , A : List[Any] ):
# fmt: off
snake_case__ : str = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
snake_case__ : List[Any] = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' )
snake_case__ : int = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Any = in_proj_weight[: hidden_size, :]
snake_case__ : Tuple = in_proj_bias[:config.hidden_size]
snake_case__ : List[str] = in_proj_weight[hidden_size : hidden_size * 2, :]
snake_case__ : Dict = in_proj_bias[hidden_size : hidden_size * 2]
snake_case__ : Any = in_proj_weight[-hidden_size :, :]
snake_case__ : int = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
snake_case__ : List[Any] = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' )
snake_case__ : List[str] = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Optional[int] = in_proj_weight[: hidden_size, :]
snake_case__ : Optional[Any] = in_proj_bias[:config.hidden_size]
snake_case__ : int = in_proj_weight[hidden_size : hidden_size * 2, :]
snake_case__ : List[str] = in_proj_bias[hidden_size : hidden_size * 2]
snake_case__ : List[str] = in_proj_weight[-hidden_size :, :]
snake_case__ : str = in_proj_bias[-hidden_size :]
# fmt: on
def lowercase_ ():
snake_case__ : Any = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case__ : int = Image.open(requests.get(A , stream=A ).raw )
return im
@torch.no_grad()
def lowercase_ (A : str , A : str , A : str , A : bool = False ):
snake_case__ : Optional[int] = get_maskformer_config(A )
# load original state_dict
with open(A , 'rb' ) as f:
snake_case__ : List[Any] = pickle.load(A )
snake_case__ : Optional[int] = data['model']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
snake_case__ : List[str] = create_rename_keys(A )
for src, dest in rename_keys:
rename_key(A , A , A )
read_in_swin_q_k_v(A , config.backbone_config )
read_in_decoder_q_k_v(A , A )
# update to torch tensors
for key, value in state_dict.items():
snake_case__ : int = torch.from_numpy(A )
# load 🤗 model
snake_case__ : str = MaskFormerForInstanceSegmentation(A )
model.eval()
for name, param in model.named_parameters():
print(A , param.shape )
snake_case__ , snake_case__ : Union[str, Any] = model.load_state_dict(A , strict=A )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(A ) == 0, F'''Unexpected keys: {unexpected_keys}'''
# verify results
snake_case__ : Optional[Any] = prepare_img()
if "vistas" in model_name:
snake_case__ : int = 6_5
elif "cityscapes" in model_name:
snake_case__ : Dict = 6_5_5_3_5
else:
snake_case__ : Tuple = 2_5_5
snake_case__ : Optional[int] = True if 'ade' in model_name else False
snake_case__ : Dict = MaskFormerImageProcessor(ignore_index=A , reduce_labels=A )
snake_case__ : Any = image_processor(A , return_tensors='pt' )
snake_case__ : Any = model(**A )
print('Logits:' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
snake_case__ : Tuple = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , A , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' )
Path(A ).mkdir(exist_ok=A )
model.save_pretrained(A )
image_processor.save_pretrained(A )
if push_to_hub:
print('Pushing model and image processor to the hub...' )
model.push_to_hub(F'''nielsr/{model_name}''' )
image_processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
a_ :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="maskformer-swin-tiny-ade",
type=str,
help=("Name of the MaskFormer model you'd like to convert",),
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl",
type=str,
help="Path to the original state dict (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
a_ :Dict = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 277 | 0 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class lowercase__ ( datasets.BuilderConfig ):
lowercase__ = None
class lowercase__ ( datasets.ArrowBasedBuilder ):
lowercase__ = PandasConfig
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
_UpperCamelCase : List[str] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCamelCase__ ,(str, list, tuple) ):
_UpperCamelCase : List[str] = data_files
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
_UpperCamelCase : Any = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_UpperCamelCase : Dict = [dl_manager.iter_files(lowerCamelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={'files': files} )]
_UpperCamelCase : str = []
for split_name, files in data_files.items():
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
_UpperCamelCase : Union[str, Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_UpperCamelCase : Optional[int] = [dl_manager.iter_files(lowerCamelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCamelCase__ ,gen_kwargs={'files': files} ) )
return splits
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : pa.Table ):
'''simple docstring'''
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_UpperCamelCase : Optional[int] = table_cast(lowerCamelCase__ ,self.config.features.arrow_schema )
return pa_table
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
for i, file in enumerate(itertools.chain.from_iterable(lowerCamelCase__ ) ):
with open(lowerCamelCase__ ,'rb' ) as f:
_UpperCamelCase : str = pa.Table.from_pandas(pd.read_pickle(lowerCamelCase__ ) )
yield i, self._cast_table(lowerCamelCase__ )
| 236 |
'''simple docstring'''
from datetime import datetime
import requests
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : List[Any] = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
_UpperCamelCase : Optional[int] = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(UpperCAmelCase_ ).content
if __name__ == "__main__":
snake_case_ : List[str] = input('Enter Video/IGTV url: ').strip()
snake_case_ : str = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(F"""Done. Video saved to disk as {file_name}.""")
| 236 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class a_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , A , A=7 , A=3 , A=10 , A=18 , A=30 , A=400 , A=True , A=None , A=True , A=[0.5, 0.5, 0.5] , A=[0.5, 0.5, 0.5] , A=None , ) -> List[Any]:
_SCREAMING_SNAKE_CASE = size if size is not None else {"""shortest_edge""": 18}
_SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = num_frames
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = min_resolution
_SCREAMING_SNAKE_CASE = max_resolution
_SCREAMING_SNAKE_CASE = do_resize
_SCREAMING_SNAKE_CASE = size
_SCREAMING_SNAKE_CASE = do_normalize
_SCREAMING_SNAKE_CASE = image_mean
_SCREAMING_SNAKE_CASE = image_std
_SCREAMING_SNAKE_CASE = crop_size
def snake_case_( self ) -> str:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = VivitImageProcessor if is_vision_available() else None
def snake_case_( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = VivitImageProcessingTester(self )
@property
def snake_case_( self ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_( self ) -> int:
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , """image_mean""" ) )
self.assertTrue(hasattr(A , """image_std""" ) )
self.assertTrue(hasattr(A , """do_normalize""" ) )
self.assertTrue(hasattr(A , """do_resize""" ) )
self.assertTrue(hasattr(A , """do_center_crop""" ) )
self.assertTrue(hasattr(A , """size""" ) )
def snake_case_( self ) -> int:
_SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def snake_case_( self ) -> Optional[int]:
# Initialize image_processing
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
_SCREAMING_SNAKE_CASE = prepare_video_inputs(self.image_processor_tester , equal_resolution=A )
for video in video_inputs:
self.assertIsInstance(A , A )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def snake_case_( self ) -> List[str]:
# Initialize image_processing
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE = prepare_video_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for video in video_inputs:
self.assertIsInstance(A , A )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def snake_case_( self ) -> str:
# Initialize image_processing
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE = prepare_video_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for video in video_inputs:
self.assertIsInstance(A , A )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 58 |
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
lowercase_ = None
lowercase_ = """<""" if sys.byteorder == """little""" else """>"""
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
lowercase_ = [
np.dtype("""|b1"""),
np.dtype("""|u1"""),
np.dtype("""<u2"""),
np.dtype(""">u2"""),
np.dtype("""<i2"""),
np.dtype(""">i2"""),
np.dtype("""<u4"""),
np.dtype(""">u4"""),
np.dtype("""<i4"""),
np.dtype(""">i4"""),
np.dtype("""<f4"""),
np.dtype(""">f4"""),
np.dtype("""<f8"""),
np.dtype(""">f8"""),
]
@dataclass
class a_ :
'''simple docstring'''
UpperCamelCase = True
UpperCamelCase = None
# Automatically constructed
UpperCamelCase = "PIL.Image.Image"
UpperCamelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
UpperCamelCase = field(default='''Image''' , init=snake_case_ , repr=snake_case_ )
def __call__( self ) -> Tuple:
return self.pa_type
def snake_case_( self , A ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if isinstance(A , A ):
_SCREAMING_SNAKE_CASE = np.array(A )
if isinstance(A , A ):
return {"path": value, "bytes": None}
elif isinstance(A , A ):
return {"path": None, "bytes": value}
elif isinstance(A , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(A )
elif isinstance(A , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(A )
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f'An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def snake_case_( self , A , A=None ) -> "PIL.Image.Image":
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support decoding images, please install 'Pillow'.""" )
if token_per_repo_id is None:
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(f'An image should have one of \'path\' or \'bytes\' but both are None in {value}.' )
else:
if is_local_path(A ):
_SCREAMING_SNAKE_CASE = PIL.Image.open(A )
else:
_SCREAMING_SNAKE_CASE = path.split("""::""" )[-1]
try:
_SCREAMING_SNAKE_CASE = string_to_dict(A , config.HUB_DATASETS_URL )["""repo_id"""]
_SCREAMING_SNAKE_CASE = token_per_repo_id.get(A )
except ValueError:
_SCREAMING_SNAKE_CASE = None
with xopen(A , """rb""" , use_auth_token=A ) as f:
_SCREAMING_SNAKE_CASE = BytesIO(f.read() )
_SCREAMING_SNAKE_CASE = PIL.Image.open(bytes_ )
else:
_SCREAMING_SNAKE_CASE = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def snake_case_( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
)
def snake_case_( self , A ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
_SCREAMING_SNAKE_CASE = pa.array([None] * len(A ) , type=pa.binary() )
_SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_SCREAMING_SNAKE_CASE = pa.array([None] * len(A ) , type=pa.string() )
_SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
_SCREAMING_SNAKE_CASE = storage.field("""bytes""" )
else:
_SCREAMING_SNAKE_CASE = pa.array([None] * len(A ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
_SCREAMING_SNAKE_CASE = storage.field("""path""" )
else:
_SCREAMING_SNAKE_CASE = pa.array([None] * len(A ) , type=pa.string() )
_SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
_SCREAMING_SNAKE_CASE = pa.array(
[encode_np_array(np.array(A ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
_SCREAMING_SNAKE_CASE = pa.array([None] * len(A ) , type=pa.string() )
_SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(A , self.pa_type )
def snake_case_( self , A ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(A ):
with xopen(A , """rb""" ) as f:
_SCREAMING_SNAKE_CASE = f.read()
return bytes_
_SCREAMING_SNAKE_CASE = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
_SCREAMING_SNAKE_CASE = pa.array(
[os.path.basename(A ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
_SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(A , self.pa_type )
def lowerCamelCase ( ) ->List[str]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
_SCREAMING_SNAKE_CASE = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def lowerCamelCase ( __lowerCamelCase : "PIL.Image.Image" ) ->bytes:
_SCREAMING_SNAKE_CASE = BytesIO()
if image.format in list_image_compression_formats():
_SCREAMING_SNAKE_CASE = image.format
else:
_SCREAMING_SNAKE_CASE = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(__lowerCamelCase , format=__lowerCamelCase )
return buffer.getvalue()
def lowerCamelCase ( __lowerCamelCase : "PIL.Image.Image" ) ->dict:
if hasattr(__lowerCamelCase , """filename""" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(__lowerCamelCase )}
def lowerCamelCase ( __lowerCamelCase : np.ndarray ) ->dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
_SCREAMING_SNAKE_CASE = array.dtype
_SCREAMING_SNAKE_CASE = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
_SCREAMING_SNAKE_CASE = dtype.kind
_SCREAMING_SNAKE_CASE = dtype.itemsize
_SCREAMING_SNAKE_CASE = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
_SCREAMING_SNAKE_CASE = np.dtype("""|u1""" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.' )
if dtype is not dest_dtype:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
_SCREAMING_SNAKE_CASE = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
_SCREAMING_SNAKE_CASE = dtype_byteorder + dtype_kind + str(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = np.dtype(__lowerCamelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}' )
_SCREAMING_SNAKE_CASE = PIL.Image.fromarray(array.astype(__lowerCamelCase ) )
return {"path": None, "bytes": image_to_bytes(__lowerCamelCase )}
def lowerCamelCase ( __lowerCamelCase : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) ->List[dict]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if objs:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = first_non_null_value(__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(__lowerCamelCase , np.ndarray ):
_SCREAMING_SNAKE_CASE = no_op_if_value_is_null(__lowerCamelCase )
return [obj_to_image_dict_func(__lowerCamelCase ) for obj in objs]
elif isinstance(__lowerCamelCase , PIL.Image.Image ):
_SCREAMING_SNAKE_CASE = no_op_if_value_is_null(__lowerCamelCase )
return [obj_to_image_dict_func(__lowerCamelCase ) for obj in objs]
else:
return objs
else:
return objs
| 58 | 1 |
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a ( UpperCAmelCase ):
def __init__( self , *A_ , A_=None , A_=None , **A_ ):
'''simple docstring'''
super().__init__(*A_ , **A_ )
_UpperCAmelCase : str = eval_examples
_UpperCAmelCase : Optional[int] = post_process_function
def _UpperCAmelCase ( self , A_=None , A_=None , A_=None , A_ = "eval" ):
'''simple docstring'''
_UpperCAmelCase : int = self.eval_dataset if eval_dataset is None else eval_dataset
_UpperCAmelCase : List[str] = self.get_eval_dataloader(A_ )
_UpperCAmelCase : int = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_UpperCAmelCase : str = self.compute_metrics
_UpperCAmelCase : str = None
_UpperCAmelCase : Any = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_UpperCAmelCase : List[Any] = time.time()
try:
_UpperCAmelCase : Union[str, Any] = eval_loop(
A_ , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A_ , metric_key_prefix=A_ , )
finally:
_UpperCAmelCase : List[str] = compute_metrics
_UpperCAmelCase : List[str] = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
A_ , A_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_UpperCAmelCase : List[Any] = self.post_process_function(A_ , A_ , output.predictions )
_UpperCAmelCase : List[Any] = self.compute_metrics(A_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
_UpperCAmelCase : List[Any] = metrics.pop(A_ )
metrics.update(output.metrics )
else:
_UpperCAmelCase : List[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(A_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_UpperCAmelCase : List[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , A_ )
return metrics
def _UpperCAmelCase ( self , A_ , A_ , A_=None , A_ = "test" ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.get_test_dataloader(A_ )
# Temporarily disable metric computation, we will do it in the loop here.
_UpperCAmelCase : List[str] = self.compute_metrics
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_UpperCAmelCase : Union[str, Any] = time.time()
try:
_UpperCAmelCase : Optional[Any] = eval_loop(
A_ , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A_ , metric_key_prefix=A_ , )
finally:
_UpperCAmelCase : Any = compute_metrics
_UpperCAmelCase : Tuple = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
A_ , A_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
_UpperCAmelCase : Dict = self.post_process_function(A_ , A_ , output.predictions , "predict" )
_UpperCAmelCase : str = self.compute_metrics(A_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
_UpperCAmelCase : int = metrics.pop(A_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=A_ )
| 189 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) # pylint: disable=invalid-name
SCREAMING_SNAKE_CASE_ = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Any , lowerCAmelCase: List[str] , lowerCAmelCase: int=8 ) -> int:
_UpperCAmelCase : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_UpperCAmelCase : List[str] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class a ( UpperCAmelCase ):
def __init__( self , A_ , A_ , A_ , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=A_ , scheduler=A_ , movq=A_ , )
_UpperCAmelCase : Optional[int] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _UpperCAmelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
if latents is None:
_UpperCAmelCase : Any = randn_tensor(A_ , generator=A_ , device=A_ , dtype=A_ )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_UpperCAmelCase : Optional[int] = latents.to(A_ )
_UpperCAmelCase : Tuple = latents * scheduler.init_noise_sigma
return latents
def _UpperCAmelCase ( self , A_=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_UpperCAmelCase : Union[str, Any] = torch.device(f'cuda:{gpu_id}' )
_UpperCAmelCase : Any = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A_ , A_ )
def _UpperCAmelCase ( self , A_=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
_UpperCAmelCase : str = torch.device(f'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=A_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_UpperCAmelCase : Optional[int] = None
for cpu_offloaded_model in [self.unet, self.movq]:
_UpperCAmelCase , _UpperCAmelCase : Dict = cpu_offload_with_hook(A_ , A_ , prev_module_hook=A_ )
# We'll offload the last model manually.
_UpperCAmelCase : Optional[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _UpperCAmelCase ( self ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(A_ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(A_ )
def __call__( self , A_ , A_ , A_ , A_ = 512 , A_ = 512 , A_ = 100 , A_ = 4.0 , A_ = 1 , A_ = None , A_ = None , A_ = "pil" , A_ = True , ):
'''simple docstring'''
_UpperCAmelCase : str = self._execution_device
_UpperCAmelCase : Tuple = guidance_scale > 1.0
if isinstance(A_ , A_ ):
_UpperCAmelCase : Union[str, Any] = torch.cat(A_ , dim=0 )
if isinstance(A_ , A_ ):
_UpperCAmelCase : Dict = torch.cat(A_ , dim=0 )
if isinstance(A_ , A_ ):
_UpperCAmelCase : Any = torch.cat(A_ , dim=0 )
_UpperCAmelCase : Optional[Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
_UpperCAmelCase : Optional[int] = image_embeds.repeat_interleave(A_ , dim=0 )
_UpperCAmelCase : Union[str, Any] = negative_image_embeds.repeat_interleave(A_ , dim=0 )
_UpperCAmelCase : Tuple = hint.repeat_interleave(A_ , dim=0 )
_UpperCAmelCase : List[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=A_ )
_UpperCAmelCase : List[Any] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=A_ )
self.scheduler.set_timesteps(A_ , device=A_ )
_UpperCAmelCase : Dict = self.scheduler.timesteps
_UpperCAmelCase : Union[str, Any] = self.movq.config.latent_channels
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = downscale_height_and_width(A_ , A_ , self.movq_scale_factor )
# create initial latent
_UpperCAmelCase : Tuple = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , A_ , A_ , A_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the latents if we are doing classifier free guidance
_UpperCAmelCase : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_UpperCAmelCase : Optional[Any] = {"image_embeds": image_embeds, "hint": hint}
_UpperCAmelCase : Optional[int] = self.unet(
sample=A_ , timestep=A_ , encoder_hidden_states=A_ , added_cond_kwargs=A_ , return_dict=A_ , )[0]
if do_classifier_free_guidance:
_UpperCAmelCase , _UpperCAmelCase : Any = noise_pred.split(latents.shape[1] , dim=1 )
_UpperCAmelCase , _UpperCAmelCase : Tuple = noise_pred.chunk(2 )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = variance_pred.chunk(2 )
_UpperCAmelCase : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_UpperCAmelCase : int = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase : int = self.scheduler.step(
A_ , A_ , A_ , generator=A_ , )[0]
# post-processing
_UpperCAmelCase : Optional[Any] = self.movq.decode(A_ , force_not_quantize=A_ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
_UpperCAmelCase : Union[str, Any] = image * 0.5 + 0.5
_UpperCAmelCase : Dict = image.clamp(0 , 1 )
_UpperCAmelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_UpperCAmelCase : str = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
| 189 | 1 |
'''simple docstring'''
UpperCAmelCase_ : Dict = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
UpperCAmelCase_ : Any = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
UpperCAmelCase_ : Tuple = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
UpperCAmelCase_ : Dict = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
UpperCAmelCase_ : Tuple = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
UpperCAmelCase_ : Union[str, Any] = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
UpperCAmelCase_ : Tuple = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
UpperCAmelCase_ : int = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 200 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : str = [10, 20, 30, 40, 50, 60]
_SCREAMING_SNAKE_CASE : List[str] = [2, 4, 6, 8, 10, 12]
_SCREAMING_SNAKE_CASE : str = 100
self.assertEqual(kp.calc_profit(__snake_case , __snake_case , __snake_case ) , 210 )
def UpperCAmelCase_ ( self ):
self.assertRaisesRegex(__snake_case , """max_weight must greater than zero.""" )
def UpperCAmelCase_ ( self ):
self.assertRaisesRegex(__snake_case , """Weight can not be negative.""" )
def UpperCAmelCase_ ( self ):
self.assertRaisesRegex(__snake_case , """Profit can not be negative.""" )
def UpperCAmelCase_ ( self ):
self.assertRaisesRegex(__snake_case , """max_weight must greater than zero.""" )
def UpperCAmelCase_ ( self ):
self.assertRaisesRegex(
__snake_case , """The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 200 | 1 |
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __magic_name__ :
def __init__( self : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict=3 , lowerCamelCase__ : List[Any]=32 , lowerCamelCase__ : List[Any]=3 , lowerCamelCase__ : str=10 , lowerCamelCase__ : int=[10, 20, 30, 40] , lowerCamelCase__ : Any=[1, 1, 2, 1] , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : Optional[int]="relu" , lowerCamelCase__ : str=3 , lowerCamelCase__ : int=None , ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : List[str] = parent
UpperCamelCase__ : Optional[int] = batch_size
UpperCamelCase__ : Optional[int] = image_size
UpperCamelCase__ : List[str] = num_channels
UpperCamelCase__ : Optional[Any] = embeddings_size
UpperCamelCase__ : List[Any] = hidden_sizes
UpperCamelCase__ : Tuple = depths
UpperCamelCase__ : Tuple = is_training
UpperCamelCase__ : Dict = use_labels
UpperCamelCase__ : Any = hidden_act
UpperCamelCase__ : Union[str, Any] = num_labels
UpperCamelCase__ : str = scope
UpperCamelCase__ : List[Any] = len(lowerCamelCase__ )
def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ : str = None
if self.use_labels:
UpperCamelCase__ : Dict = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase__ : List[Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : str ) -> int:
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def UpperCAmelCase__ ( self : Dict , lowerCamelCase__ : str , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : List[Any] = TFRegNetModel(config=lowerCamelCase__ )
UpperCamelCase__ : List[str] = model(lowerCamelCase__ , training=lowerCamelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : int , lowerCamelCase__ : Dict , lowerCamelCase__ : List[Any] ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : Tuple = self.num_labels
UpperCamelCase__ : int = TFRegNetForImageClassification(lowerCamelCase__ )
UpperCamelCase__ : List[str] = model(lowerCamelCase__ , labels=lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : Any = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : List[str] = config_and_inputs
UpperCamelCase__ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __magic_name__ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase):
A: List[Any] = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
A: Tuple = (
{"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
A: Tuple = False
A: Dict = False
A: Tuple = False
A: Dict = False
A: Any = False
def UpperCAmelCase__ ( self : str ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : Tuple = TFRegNetModelTester(self )
UpperCamelCase__ : Optional[int] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
@slow
def UpperCAmelCase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : str ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : int = model_class(lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : int = [*signature.parameters.keys()]
UpperCamelCase__ : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase__ : Dict , lowerCamelCase__ : str , lowerCamelCase__ : List[str] ):
UpperCamelCase__ : Optional[Any] = model_class(lowerCamelCase__ )
UpperCamelCase__ : List[Any] = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) , training=lowerCamelCase__ )
UpperCamelCase__ : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase__ : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase__ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
UpperCamelCase__ , UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : Union[str, Any] = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCamelCase__ : List[str] = layer_type
UpperCamelCase__ : Optional[int] = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ : Dict = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__ ( self : Any ) -> Any:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any={} ):
UpperCamelCase__ : int = model(lowerCamelCase__ , return_dict=lowerCamelCase__ , **lowerCamelCase__ )
UpperCamelCase__ : int = model(lowerCamelCase__ , return_dict=lowerCamelCase__ , **lowerCamelCase__ ).to_tuple()
def recursive_check(lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Tuple ):
if isinstance(lowerCamelCase__ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowerCamelCase__ , lowerCamelCase__ ):
recursive_check(lowerCamelCase__ , lowerCamelCase__ )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(lowerCamelCase__ , lowerCamelCase__ ) ) , msg=(
'''Tuple and dict output are not equal. Difference:'''
F" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"
) , )
recursive_check(lowerCamelCase__ , lowerCamelCase__ )
for model_class in self.all_model_classes:
UpperCamelCase__ : str = model_class(lowerCamelCase__ )
UpperCamelCase__ : List[str] = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : Tuple = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
check_equivalence(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : Union[str, Any] = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ : Any = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
check_equivalence(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : Optional[Any] = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
check_equivalence(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , {'''output_hidden_states''': True} )
UpperCamelCase__ : Dict = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ : int = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
check_equivalence(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , {'''output_hidden_states''': True} )
def UpperCAmelCase__ ( self : Tuple ) -> int:
'''simple docstring'''
UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Tuple = TFRegNetModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def _a ( ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCamelCase__ : Any = self.default_image_processor
UpperCamelCase__ : Dict = prepare_img()
UpperCamelCase__ : int = image_processor(images=lowerCamelCase__ , return_tensors='''tf''' )
# forward pass
UpperCamelCase__ : Tuple = model(**lowerCamelCase__ , training=lowerCamelCase__ )
# verify the logits
UpperCamelCase__ : Any = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
UpperCamelCase__ : Optional[Any] = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , lowerCamelCase__ , atol=1E-4 )
| 51 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : Any=False ):
"""simple docstring"""
UpperCamelCase__ : str = '''backbone.''' if is_semantic else ''''''
UpperCamelCase__ : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"{prefix}blocks.{i}.norm1.weight", F"beit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm1.bias", F"beit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.weight", F"beit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.bias", F"beit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.weight", F"beit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.bias", F"beit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.weight", F"beit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.bias", F"beit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.weight", F"beit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.bias", F"beit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
(F"{prefix}cls_token", '''beit.embeddings.cls_token'''),
(F"{prefix}patch_embed.proj.weight", '''beit.embeddings.patch_embeddings.projection.weight'''),
(F"{prefix}patch_embed.proj.bias", '''beit.embeddings.patch_embeddings.projection.bias'''),
(F"{prefix}pos_embed", '''beit.embeddings.position_embeddings'''),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('''mask_token''', '''beit.embeddings.mask_token'''),
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''),
('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : int=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
UpperCamelCase__ : Union[str, Any] = '''backbone.''' if is_semantic else ''''''
# queries, keys and values
UpperCamelCase__ : int = state_dict.pop(F"{prefix}blocks.{i}.attn.qkv.weight" )
UpperCamelCase__ : List[str] = state_dict.pop(F"{prefix}blocks.{i}.attn.q_bias" )
UpperCamelCase__ : Tuple = state_dict.pop(F"{prefix}blocks.{i}.attn.v_bias" )
UpperCamelCase__ : List[str] = in_proj_weight[
: config.hidden_size, :
]
UpperCamelCase__ : Optional[int] = q_bias
UpperCamelCase__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ : Union[str, Any] = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
UpperCamelCase__ : List[Any] = state_dict.pop(F"{prefix}blocks.{i}.gamma_1" )
UpperCamelCase__ : List[str] = state_dict.pop(F"{prefix}blocks.{i}.gamma_2" )
UpperCamelCase__ : Any = gamma_a
UpperCamelCase__ : str = gamma_a
def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = dct.pop(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = val
def _a ( ):
"""simple docstring"""
UpperCamelCase__ : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase__ : Optional[Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict=False ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = False if '''rvlcdip''' in checkpoint_url else True
UpperCamelCase__ : str = BeitConfig(use_absolute_position_embeddings=SCREAMING_SNAKE_CASE , use_mask_token=SCREAMING_SNAKE_CASE )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
UpperCamelCase__ : List[str] = 1024
UpperCamelCase__ : Union[str, Any] = 4096
UpperCamelCase__ : Optional[int] = 24
UpperCamelCase__ : List[str] = 16
# labels
if "rvlcdip" in checkpoint_url:
UpperCamelCase__ : Any = 16
UpperCamelCase__ : Optional[int] = '''huggingface/label-files'''
UpperCamelCase__ : Union[str, Any] = '''rvlcdip-id2label.json'''
UpperCamelCase__ : Dict = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
UpperCamelCase__ : Optional[Any] = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCamelCase__ : int = idalabel
UpperCamelCase__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
UpperCamelCase__ : Optional[int] = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='''cpu''' )['''model''']
UpperCamelCase__ : str = create_rename_keys(SCREAMING_SNAKE_CASE , has_lm_head=SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
read_in_q_k_v(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , has_lm_head=SCREAMING_SNAKE_CASE )
# load HuggingFace model
UpperCamelCase__ : Tuple = BeitForMaskedImageModeling(SCREAMING_SNAKE_CASE ) if has_lm_head else BeitForImageClassification(SCREAMING_SNAKE_CASE )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE )
# Check outputs on an image
UpperCamelCase__ : List[str] = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = prepare_img()
UpperCamelCase__ : Union[str, Any] = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
UpperCamelCase__ : Union[str, Any] = encoding['''pixel_values''']
UpperCamelCase__ : str = model(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = outputs.logits
# verify logits
UpperCamelCase__ : Dict = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(SCREAMING_SNAKE_CASE ), "Shape of logits not as expected"
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
if has_lm_head:
UpperCamelCase__ : Any = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large'''
else:
UpperCamelCase__ : Optional[Any] = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip'''
image_processor.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=SCREAMING_SNAKE_CASE , )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=SCREAMING_SNAKE_CASE , )
if __name__ == "__main__":
__UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
__UpperCamelCase : Dict = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 51 | 1 |
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def __A ( ) -> None:
print("""Making key files...""" )
make_key_files("""rsa""" , 1024 )
print("""Key files generation successful.""" )
def __A ( __lowerCamelCase ) -> tuple[tuple[int, int], tuple[int, int]]:
print("""Generating prime p...""" )
a = rabinMiller.generate_large_prime(__lowerCamelCase )
print("""Generating prime q...""" )
a = rabinMiller.generate_large_prime(__lowerCamelCase )
a = p * q
print("""Generating e that is relatively prime to (p - 1) * (q - 1)...""" )
while True:
a = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(__lowerCamelCase , (p - 1) * (q - 1) ) == 1:
break
print("""Calculating d that is mod inverse of e...""" )
a = cryptoMath.find_mod_inverse(__lowerCamelCase , (p - 1) * (q - 1) )
a = (n, e)
a = (n, d)
return (public_key, private_key)
def __A ( __lowerCamelCase , __lowerCamelCase ) -> None:
if os.path.exists(f'{name}_pubkey.txt' ) or os.path.exists(f'{name}_privkey.txt' ):
print("""\nWARNING:""" )
print(
f'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
a , a = generate_key(__lowerCamelCase )
print(f'\nWriting public key to file {name}_pubkey.txt...' )
with open(f'{name}_pubkey.txt' , """w""" ) as out_file:
out_file.write(f'{key_size},{public_key[0]},{public_key[1]}' )
print(f'Writing private key to file {name}_privkey.txt...' )
with open(f'{name}_privkey.txt' , """w""" ) as out_file:
out_file.write(f'{key_size},{private_key[0]},{private_key[1]}' )
if __name__ == "__main__":
main()
| 228 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __A ( ) -> Any:
a = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=__lowerCamelCase , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=__lowerCamelCase , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=__lowerCamelCase )
return parser.parse_args()
def __A ( ) -> Union[str, Any]:
a = parse_args()
# Import training_script as a module.
a = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
a = script_fpath.stem
a = importlib.import_module(__lowerCamelCase )
# Patch sys.argv
a = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 228 | 1 |
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
_A = logging.getLogger(__name__)
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , ) -> Any:
lowerCAmelCase__ : List[str] = bnb_quantization_config.load_in_abit
lowerCAmelCase__ : List[Any] = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
lowerCAmelCase__ : List[str] = []
# custom device map
if isinstance(__UpperCamelCase , __UpperCamelCase ) and len(device_map.keys() ) > 1:
lowerCAmelCase__ : Optional[Any] = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowerCAmelCase__ : int = get_keys_to_not_convert(__UpperCamelCase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(__UpperCamelCase )
lowerCAmelCase__ : Any = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : Tuple = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(__UpperCamelCase )
# compatibility with peft
lowerCAmelCase__ : Any = load_in_abit
lowerCAmelCase__ : Optional[Any] = load_in_abit
lowerCAmelCase__ : Optional[int] = get_parameter_device(__UpperCamelCase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
lowerCAmelCase__ : List[str] = replace_with_bnb_layers(__UpperCamelCase , __UpperCamelCase , modules_to_not_convert=__UpperCamelCase )
# convert param to the right dtype
lowerCAmelCase__ : Optional[int] = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
lowerCAmelCase__ : str = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
lowerCAmelCase__ : int = getattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(__UpperCamelCase ):
param.to(__UpperCamelCase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
lowerCAmelCase__ : Any = replace_with_bnb_layers(
__UpperCamelCase , __UpperCamelCase , modules_to_not_convert=__UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = get_quantized_model_device_map(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , max_memory=__UpperCamelCase , no_split_module_classes=__UpperCamelCase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowerCAmelCase__ : Dict = True
lowerCAmelCase__ : str = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=__UpperCamelCase , offload_state_dict=__UpperCamelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(__UpperCamelCase , device_map=__UpperCamelCase , offload_dir=__UpperCamelCase )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None ) -> Optional[Any]:
if device_map is None:
if torch.cuda.is_available():
lowerCAmelCase__ : int = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{\'\':torch.cuda.current_device()}`.""" )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or """
"""\'sequential\'.""" )
lowerCAmelCase__ : Tuple = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
lowerCAmelCase__ : Tuple = {}
lowerCAmelCase__ : Dict = special_dtypes
lowerCAmelCase__ : Union[str, Any] = no_split_module_classes
lowerCAmelCase__ : Union[str, Any] = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowerCAmelCase__ : int = get_balanced_memory(
__UpperCamelCase , low_zero=(device_map == """balanced_low_0""") , max_memory=__UpperCamelCase , **__UpperCamelCase , )
lowerCAmelCase__ : Dict = max_memory
lowerCAmelCase__ : Any = infer_auto_device_map(__UpperCamelCase , **__UpperCamelCase )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
# check if don't have any quantized module on the cpu
lowerCAmelCase__ : List[Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowerCAmelCase__ : Union[str, Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None ) -> Optional[int]:
if modules_to_not_convert is None:
lowerCAmelCase__ : Union[str, Any] = []
lowerCAmelCase__ , lowerCAmelCase__ : Dict = _replace_with_bnb_layers(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , ) -> Union[str, Any]:
lowerCAmelCase__ : Dict = False
for name, module in model.named_children():
if current_key_name is None:
lowerCAmelCase__ : Union[str, Any] = []
current_key_name.append(__UpperCamelCase )
if isinstance(__UpperCamelCase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowerCAmelCase__ : int = """.""".join(__UpperCamelCase )
lowerCAmelCase__ : int = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowerCAmelCase__ : Any = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowerCAmelCase__ : Any = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__UpperCamelCase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowerCAmelCase__ : Any = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can\'t be both False""" )
lowerCAmelCase__ : List[str] = module.weight.data
if module.bias is not None:
lowerCAmelCase__ : Optional[Any] = module.bias.data
bnb_module.requires_grad_(__UpperCamelCase )
setattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
lowerCAmelCase__ : Optional[int] = True
if len(list(module.children() ) ) > 0:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = _replace_with_bnb_layers(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
lowerCAmelCase__ : Tuple = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowercase_ ( __UpperCAmelCase ) -> int:
with init_empty_weights():
lowerCAmelCase__ : Optional[int] = deepcopy(__UpperCamelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowerCAmelCase__ : Union[str, Any] = find_tied_parameters(__UpperCamelCase )
# For compatibility with Accelerate < 0.18
if isinstance(__UpperCamelCase , __UpperCamelCase ):
lowerCAmelCase__ : Any = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowerCAmelCase__ : Dict = sum(__UpperCamelCase , [] )
lowerCAmelCase__ : Union[str, Any] = len(__UpperCamelCase ) > 0
# Check if it is a base model
lowerCAmelCase__ : Optional[int] = False
if hasattr(__UpperCamelCase , """base_model_prefix""" ):
lowerCAmelCase__ : List[str] = not hasattr(__UpperCamelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCAmelCase__ : Dict = list(model.named_children() )
lowerCAmelCase__ : Tuple = [list_modules[-1][0]]
# add last module together with tied weights
lowerCAmelCase__ : Optional[int] = set(__UpperCamelCase ) - set(__UpperCamelCase )
lowerCAmelCase__ : Optional[int] = list(set(__UpperCamelCase ) ) + list(__UpperCamelCase )
# remove ".weight" from the keys
lowerCAmelCase__ : List[Any] = [""".weight""", """.bias"""]
lowerCAmelCase__ : Tuple = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCAmelCase__ : str = name.replace(__UpperCamelCase , """""" )
filtered_module_names.append(__UpperCamelCase )
return filtered_module_names
def lowercase_ ( __UpperCAmelCase ) -> Dict:
for m in model.modules():
if isinstance(__UpperCamelCase , bnb.nn.Linearabit ):
return True
return False
def lowercase_ ( __UpperCAmelCase ) -> Dict:
return next(parameter.parameters() ).device
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
if fpaa_statistics is None:
set_module_tensor_to_device(__UpperCamelCase , __UpperCamelCase , 0 , dtype=__UpperCamelCase , value=__UpperCamelCase )
lowerCAmelCase__ : int = param_name
lowerCAmelCase__ : Any = model
if "." in tensor_name:
lowerCAmelCase__ : Optional[Any] = tensor_name.split(""".""" )
for split in splits[:-1]:
lowerCAmelCase__ : Optional[int] = getattr(__UpperCamelCase , __UpperCamelCase )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
lowerCAmelCase__ : Optional[Any] = new_module
lowerCAmelCase__ : List[Any] = splits[-1]
# offload weights
lowerCAmelCase__ : Dict = False
offload_weight(module._parameters[tensor_name] , __UpperCamelCase , __UpperCamelCase , index=__UpperCamelCase )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , __UpperCamelCase , index=__UpperCamelCase , )
else:
offload_weight(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , index=__UpperCamelCase )
offload_weight(__UpperCamelCase , param_name.replace("""weight""" , """SCB""" ) , __UpperCamelCase , index=__UpperCamelCase )
set_module_tensor_to_device(__UpperCamelCase , __UpperCamelCase , """meta""" , dtype=__UpperCamelCase , value=torch.empty(*param.size() ) )
| 369 |
"""simple docstring"""
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
_A = logging.getLogger(__name__)
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Union[str, Any] = "token-classification"
def __init__( self : Dict , UpperCamelCase : Any ) -> Optional[int]:
"""simple docstring"""
if type(UpperCamelCase ) == dict:
lowerCAmelCase__ : Optional[int] = Namespace(**UpperCamelCase )
lowerCAmelCase__ : Tuple = import_module("""tasks""" )
try:
lowerCAmelCase__ : Union[str, Any] = getattr(UpperCamelCase , hparams.task_type )
lowerCAmelCase__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
lowerCAmelCase__ : Optional[Any] = self.token_classification_task.get_labels(hparams.labels )
lowerCAmelCase__ : Dict = CrossEntropyLoss().ignore_index
super().__init__(UpperCamelCase , len(self.labels ) , self.mode )
def _lowerCAmelCase ( self : int , **UpperCamelCase : List[Any] ) -> str:
"""simple docstring"""
return self.model(**UpperCamelCase )
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Any ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Tuple = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
lowerCAmelCase__ : List[str] = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
lowerCAmelCase__ : Tuple = self(**UpperCamelCase )
lowerCAmelCase__ : List[Any] = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def _lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.hparams
for mode in ["train", "dev", "test"]:
lowerCAmelCase__ : Union[str, Any] = self._feature_file(UpperCamelCase )
if os.path.exists(UpperCamelCase ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , UpperCamelCase )
lowerCAmelCase__ : Tuple = torch.load(UpperCamelCase )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
lowerCAmelCase__ : Union[str, Any] = self.token_classification_task.read_examples_from_file(args.data_dir , UpperCamelCase )
lowerCAmelCase__ : Tuple = self.token_classification_task.convert_examples_to_features(
UpperCamelCase , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=UpperCamelCase , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("""Saving features into cached file %s""" , UpperCamelCase )
torch.save(UpperCamelCase , UpperCamelCase )
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : bool = False ) -> DataLoader:
"""simple docstring"""
lowerCAmelCase__ : int = self._feature_file(UpperCamelCase )
logger.info("""Loading features from cached file %s""" , UpperCamelCase )
lowerCAmelCase__ : int = torch.load(UpperCamelCase )
lowerCAmelCase__ : str = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowerCAmelCase__ : Any = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
lowerCAmelCase__ : Optional[int] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
lowerCAmelCase__ : Union[str, Any] = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
lowerCAmelCase__ : Union[str, Any] = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , batch_size=UpperCamelCase )
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[str] ) -> List[str]:
"""simple docstring"""
"""Compute validation""" ""
lowerCAmelCase__ : str = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
lowerCAmelCase__ : List[Any] = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
lowerCAmelCase__ : Union[str, Any] = self(**UpperCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = outputs[:2]
lowerCAmelCase__ : Optional[Any] = logits.detach().cpu().numpy()
lowerCAmelCase__ : Optional[Any] = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowerCAmelCase ( self : Tuple , UpperCamelCase : Optional[int] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : str = torch.stack([x["""val_loss"""] for x in outputs] ).mean()
lowerCAmelCase__ : Any = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
lowerCAmelCase__ : List[str] = np.argmax(UpperCamelCase , axis=2 )
lowerCAmelCase__ : str = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
lowerCAmelCase__ : Any = dict(enumerate(self.labels ) )
lowerCAmelCase__ : str = [[] for _ in range(out_label_ids.shape[0] )]
lowerCAmelCase__ : Optional[Any] = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
lowerCAmelCase__ : Optional[int] = {
"""val_loss""": val_loss_mean,
"""accuracy_score""": accuracy_score(UpperCamelCase , UpperCamelCase ),
"""precision""": precision_score(UpperCamelCase , UpperCamelCase ),
"""recall""": recall_score(UpperCamelCase , UpperCamelCase ),
"""f1""": fa_score(UpperCamelCase , UpperCamelCase ),
}
lowerCAmelCase__ : Dict = dict(results.items() )
lowerCAmelCase__ : List[Any] = results
return ret, preds_list, out_label_list
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : List[Any] ) -> Any:
"""simple docstring"""
# when stable
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = self._eval_end(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowerCAmelCase ( self : Dict , UpperCamelCase : int ) -> Optional[Any]:
"""simple docstring"""
# updating to test_epoch_end instead of deprecated test_end
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self._eval_end(UpperCamelCase )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
lowerCAmelCase__ : int = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowerCAmelCase ( UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any] ) -> List[str]:
"""simple docstring"""
# Add NER specific options
BaseTransformer.add_model_specific_args(UpperCamelCase , UpperCamelCase )
parser.add_argument(
"""--task_type""" , default="""NER""" , type=UpperCamelCase , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" )
parser.add_argument(
"""--max_seq_length""" , default=1_28 , type=UpperCamelCase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--labels""" , default="""""" , type=UpperCamelCase , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=UpperCamelCase , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
if __name__ == "__main__":
_A = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
_A = NERTransformer.add_model_specific_args(parser, os.getcwd())
_A = parser.parse_args()
_A = NERTransformer(args)
_A = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
_A = sorted(glob.glob(os.path.join(args.output_dir, """checkpoint-epoch=*.ckpt"""), recursive=True))
_A = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 212 | 0 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_:Union[str, Any] = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : int = BartphoTokenizer
__lowerCamelCase : Dict = False
__lowerCamelCase : List[str] = True
def _lowerCAmelCase ( self ):
super().setUp()
A : Optional[Any] = ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""]
A : Any = dict(zip(lowerCamelCase__, range(len(lowerCamelCase__ ) ) ) )
A : int = {"""unk_token""": """<unk>"""}
A : List[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""monolingual_vocab_file"""] )
with open(self.monolingual_vocab_file, """w""", encoding="""utf-8""" ) as fp:
for token in vocab_tokens:
fp.write(f'''{token} {vocab_tokens[token]}\n''' )
A : Union[str, Any] = BartphoTokenizer(lowerCamelCase__, self.monolingual_vocab_file, **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Optional[Any] = """This is a là test"""
A : int = """This is a<unk><unk> test"""
return input_text, output_text
def _lowerCAmelCase ( self ):
A : List[str] = BartphoTokenizer(lowerCamelCase__, self.monolingual_vocab_file, **self.special_tokens_map )
A : Any = """This is a là test"""
A : Union[str, Any] = """▁This ▁is ▁a ▁l à ▁t est""".split()
A : Any = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
A : Dict = tokens + [tokenizer.unk_token]
A : int = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ), lowerCamelCase__ )
| 116 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : List[str] = (DDIMParallelScheduler,)
__lowerCamelCase : int = (("eta", 0.0), ("num_inference_steps", 50))
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
A : int = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""clip_sample""": True,
}
config.update(**lowerCamelCase__ )
return config
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
A : Dict = self.scheduler_classes[0]
A : str = self.get_scheduler_config(**lowerCamelCase__ )
A : Tuple = scheduler_class(**lowerCamelCase__ )
A , A : Tuple = 10, 0.0
A : Optional[int] = self.dummy_model()
A : List[str] = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
for t in scheduler.timesteps:
A : Optional[int] = model(lowerCamelCase__, lowerCamelCase__ )
A : str = scheduler.step(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ).prev_sample
return sample
def _lowerCAmelCase ( self ):
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCamelCase__ )
A : Tuple = self.scheduler_classes[0]
A : Optional[Any] = self.get_scheduler_config(steps_offset=1 )
A : List[Any] = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps, torch.LongTensor([801, 601, 401, 201, 1] ) )
def _lowerCAmelCase ( self ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCamelCase__, beta_end=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
self.check_over_configs(thresholding=lowerCamelCase__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCamelCase__, prediction_type=lowerCamelCase__, sample_max_value=lowerCamelCase__, )
def _lowerCAmelCase ( self ):
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
for t, num_inference_steps in zip([1, 10, 50], [10, 50, 500] ):
self.check_over_forward(time_step=lowerCamelCase__, num_inference_steps=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
for t, eta in zip([1, 10, 49], [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=lowerCamelCase__, eta=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : List[str] = self.scheduler_classes[0]
A : List[str] = self.get_scheduler_config()
A : Dict = scheduler_class(**lowerCamelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0, 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420, 400 ) - 0.1_4771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980, 960 ) - 0.3_2460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0, 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487, 486 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999, 998 ) - 0.02 ) ) < 1e-5
def _lowerCAmelCase ( self ):
A : int = self.scheduler_classes[0]
A : Any = self.get_scheduler_config()
A : Union[str, Any] = scheduler_class(**lowerCamelCase__ )
A , A : List[str] = 10, 0.0
scheduler.set_timesteps(lowerCamelCase__ )
A : Any = self.dummy_model()
A : Dict = self.dummy_sample_deter
A : Dict = self.dummy_sample_deter + 0.1
A : Tuple = self.dummy_sample_deter - 0.1
A : Tuple = samplea.shape[0]
A : List[Any] = torch.stack([samplea, samplea, samplea], dim=0 )
A : str = torch.arange(lowerCamelCase__ )[0:3, None].repeat(1, lowerCamelCase__ )
A : Any = model(samples.flatten(0, 1 ), timesteps.flatten(0, 1 ) )
A : Any = scheduler.batch_step_no_noise(lowerCamelCase__, timesteps.flatten(0, 1 ), samples.flatten(0, 1 ), lowerCamelCase__ )
A : List[Any] = torch.sum(torch.abs(lowerCamelCase__ ) )
A : Optional[Any] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 1147.7904 ) < 1e-2
assert abs(result_mean.item() - 0.4982 ) < 1e-3
def _lowerCAmelCase ( self ):
A : Dict = self.full_loop()
A : str = torch.sum(torch.abs(lowerCamelCase__ ) )
A : Optional[Any] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 172.0067 ) < 1e-2
assert abs(result_mean.item() - 0.22_3967 ) < 1e-3
def _lowerCAmelCase ( self ):
A : str = self.full_loop(prediction_type="""v_prediction""" )
A : Any = torch.sum(torch.abs(lowerCamelCase__ ) )
A : Optional[Any] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 52.5302 ) < 1e-2
assert abs(result_mean.item() - 0.0684 ) < 1e-3
def _lowerCAmelCase ( self ):
# We specify different beta, so that the first alpha is 0.99
A : Any = self.full_loop(set_alpha_to_one=lowerCamelCase__, beta_start=0.01 )
A : Dict = torch.sum(torch.abs(lowerCamelCase__ ) )
A : str = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 149.8295 ) < 1e-2
assert abs(result_mean.item() - 0.1951 ) < 1e-3
def _lowerCAmelCase ( self ):
# We specify different beta, so that the first alpha is 0.99
A : int = self.full_loop(set_alpha_to_one=lowerCamelCase__, beta_start=0.01 )
A : List[Any] = torch.sum(torch.abs(lowerCamelCase__ ) )
A : List[Any] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 149.0784 ) < 1e-2
assert abs(result_mean.item() - 0.1941 ) < 1e-3
| 116 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ = {
"""configuration_rembert""": ["""REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RemBertConfig""", """RemBertOnnxConfig"""]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ["""RemBertTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ["""RemBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"""REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RemBertForCausalLM""",
"""RemBertForMaskedLM""",
"""RemBertForMultipleChoice""",
"""RemBertForQuestionAnswering""",
"""RemBertForSequenceClassification""",
"""RemBertForTokenClassification""",
"""RemBertLayer""",
"""RemBertModel""",
"""RemBertPreTrainedModel""",
"""load_tf_weights_in_rembert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"""TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRemBertForCausalLM""",
"""TFRemBertForMaskedLM""",
"""TFRemBertForMultipleChoice""",
"""TFRemBertForQuestionAnswering""",
"""TFRemBertForSequenceClassification""",
"""TFRemBertForTokenClassification""",
"""TFRemBertLayer""",
"""TFRemBertModel""",
"""TFRemBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 360 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class a__ :
def __init__( self , _A , _A=1_4 , _A=7 , _A=True , _A=True , _A=False , _A=True , _A=9_9 , _A=3_2 , _A=4 , _A=4 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_2 , _A=0.02 , ):
"""simple docstring"""
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = rotary_dim
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = initializer_range
__lowerCAmelCase = None
__lowerCAmelCase = vocab_size - 1
__lowerCAmelCase = vocab_size - 1
__lowerCAmelCase = vocab_size - 1
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=_A , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = 2_0
__lowerCAmelCase = model_class_name(_A )
__lowerCAmelCase = model.init_cache(input_ids.shape[0] , _A )
__lowerCAmelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="i4" )
__lowerCAmelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCAmelCase = model(
input_ids[:, :-1] , attention_mask=_A , past_key_values=_A , position_ids=_A , )
__lowerCAmelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
__lowerCAmelCase = model(
input_ids[:, -1:] , attention_mask=_A , past_key_values=outputs_cache.past_key_values , position_ids=_A , )
__lowerCAmelCase = model(_A )
__lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = 2_0
__lowerCAmelCase = model_class_name(_A )
__lowerCAmelCase = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__lowerCAmelCase = model.init_cache(input_ids.shape[0] , _A )
__lowerCAmelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCAmelCase = model(
input_ids[:, :-1] , attention_mask=_A , past_key_values=_A , position_ids=_A , )
__lowerCAmelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
__lowerCAmelCase = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=_A , position_ids=_A , )
__lowerCAmelCase = model(_A , attention_mask=_A )
__lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class a__ ( snake_case__ , snake_case__ , unittest.TestCase ):
_a : str = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
_a : Any = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = FlaxGPTJModelTester(self )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(_A , _A , _A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
_A , _A , _A , _A )
@tooslow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = GPTaTokenizer.from_pretrained("gpt2" , pad_token="<|endoftext|>" , padding_side="left" )
__lowerCAmelCase = tokenizer(["Hello this is a long string", "Hey"] , return_tensors="np" , padding=_A , truncation=_A )
__lowerCAmelCase = FlaxGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B" )
__lowerCAmelCase = False
__lowerCAmelCase = model.config.eos_token_id
__lowerCAmelCase = jax.jit(model.generate )
__lowerCAmelCase = jit_generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , pad_token_id=tokenizer.pad_token_id ).sequences
__lowerCAmelCase = tokenizer.batch_decode(_A , skip_special_tokens=_A )
__lowerCAmelCase = [
"Hello this is a long string of text.\n\nI'm trying to get the text of the",
"Hey, I'm a little late to the party. I'm going to",
]
self.assertListEqual(_A , _A )
@is_pt_flax_cross_test
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCAmelCase = self._prepare_for_class(_A , _A )
__lowerCAmelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCAmelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCAmelCase = getattr(_A , _A )
__lowerCAmelCase , __lowerCAmelCase = pt_inputs["input_ids"].shape
__lowerCAmelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_A ):
__lowerCAmelCase = 0
__lowerCAmelCase = 1
__lowerCAmelCase = 0
__lowerCAmelCase = 1
__lowerCAmelCase = pt_model_class(_A ).eval()
__lowerCAmelCase = model_class(_A , dtype=jnp.floataa )
__lowerCAmelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _A )
__lowerCAmelCase = fx_state
with torch.no_grad():
__lowerCAmelCase = pt_model(**_A ).to_tuple()
__lowerCAmelCase = fx_model(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(_A , _A ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_A )
__lowerCAmelCase = model_class.from_pretrained(_A , from_pt=_A )
__lowerCAmelCase = fx_model_loaded(**_A ).to_tuple()
self.assertEqual(
len(_A ) , len(_A ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(_A , _A ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCAmelCase = self._prepare_for_class(_A , _A )
__lowerCAmelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCAmelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCAmelCase = getattr(_A , _A )
__lowerCAmelCase = pt_model_class(_A ).eval()
__lowerCAmelCase = model_class(_A , dtype=jnp.floataa )
__lowerCAmelCase = load_flax_weights_in_pytorch_model(_A , fx_model.params )
__lowerCAmelCase , __lowerCAmelCase = pt_inputs["input_ids"].shape
__lowerCAmelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_A ):
__lowerCAmelCase = 0
__lowerCAmelCase = 1
__lowerCAmelCase = 0
__lowerCAmelCase = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__lowerCAmelCase = pt_model(**_A ).to_tuple()
__lowerCAmelCase = fx_model(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(_A , _A ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_A )
__lowerCAmelCase = pt_model_class.from_pretrained(_A , from_flax=_A )
with torch.no_grad():
__lowerCAmelCase = pt_model_loaded(**_A ).to_tuple()
self.assertEqual(
len(_A ) , len(_A ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(_A , _A ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
__lowerCAmelCase = model_class_name.from_pretrained("EleutherAI/gpt-j-6B" )
__lowerCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(_A )
| 102 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ : Union[str, Any] = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Dict = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Dict = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Union[str, Any] = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 246 |
"""simple docstring"""
def UpperCamelCase ( _lowerCAmelCase : int = 3, _lowerCAmelCase : int = 7, _lowerCAmelCase : int = 1000000 ) -> int:
_UpperCAmelCase : Dict = 0
_UpperCAmelCase : int = 1
for current_denominator in range(1, limit + 1 ):
_UpperCAmelCase : Union[str, Any] = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
_UpperCAmelCase : Optional[Any] = current_numerator
_UpperCAmelCase : str = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_00_00_00))
| 246 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ = logging.get_logger(__name__)
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = DPTConfig(embedding_type='hybrid' )
if "large" in checkpoint_url:
UpperCAmelCase = 1024
UpperCAmelCase = 4096
UpperCAmelCase = 24
UpperCAmelCase = 16
UpperCAmelCase = [5, 11, 17, 23]
UpperCAmelCase = [256, 512, 1024, 1024]
UpperCAmelCase = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
UpperCAmelCase = 768
UpperCAmelCase = [1, 1, 1, 0.5]
UpperCAmelCase = [256, 512, 768, 768]
UpperCAmelCase = 150
UpperCAmelCase = 16
UpperCAmelCase = (1, 384, 384)
UpperCAmelCase = False
UpperCAmelCase = 'project'
if "ade" in checkpoint_url:
UpperCAmelCase = True
UpperCAmelCase = 768
UpperCAmelCase = [1, 1, 1, 0.5]
UpperCAmelCase = 150
UpperCAmelCase = 16
UpperCAmelCase = 'huggingface/label-files'
UpperCAmelCase = 'ade20k-id2label.json'
UpperCAmelCase = json.load(open(cached_download(hf_hub_url(lowercase_ , lowercase_ , repo_type='dataset' ) ) , 'r' ) )
UpperCAmelCase = {int(lowercase_ ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
UpperCAmelCase = [1, 150, 480, 480]
return config, expected_shape
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(lowercase_ , lowercase_ )
def _lowerCAmelCase ( lowercase_ ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
UpperCAmelCase = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
UpperCAmelCase = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
UpperCAmelCase = name.replace('patch_embed' , '' )
if "pos_embed" in name:
UpperCAmelCase = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
UpperCAmelCase = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
UpperCAmelCase = name.replace('proj' , 'projection' )
if "blocks" in name:
UpperCAmelCase = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
UpperCAmelCase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
UpperCAmelCase = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name and "backbone" not in name:
UpperCAmelCase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name and "backbone" not in name:
UpperCAmelCase = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
UpperCAmelCase = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
UpperCAmelCase = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
UpperCAmelCase = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
UpperCAmelCase = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
UpperCAmelCase = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
UpperCAmelCase = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
UpperCAmelCase = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
UpperCAmelCase = name.replace(F"""refinenet{layer_idx}""" , F"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
UpperCAmelCase = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
UpperCAmelCase = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
UpperCAmelCase = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
UpperCAmelCase = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
UpperCAmelCase = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
UpperCAmelCase = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
UpperCAmelCase = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
UpperCAmelCase = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
UpperCAmelCase = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
UpperCAmelCase = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
UpperCAmelCase = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
UpperCAmelCase = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
UpperCAmelCase = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
UpperCAmelCase = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
UpperCAmelCase = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
UpperCAmelCase = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
UpperCAmelCase = name.replace('pretrained' , 'dpt' )
if "bn" in name:
UpperCAmelCase = name.replace('bn' , 'batch_norm' )
if "head" in name:
UpperCAmelCase = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
UpperCAmelCase = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
UpperCAmelCase = name.replace('auxlayer' , 'auxiliary_head.head' )
if "backbone" in name:
UpperCAmelCase = name.replace('backbone' , 'backbone.bit.encoder' )
if ".." in name:
UpperCAmelCase = name.replace('..' , '.' )
if "stem.conv" in name:
UpperCAmelCase = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
UpperCAmelCase = name.replace('blocks' , 'layers' )
if "convolution" in name and "backbone" in name:
UpperCAmelCase = name.replace('convolution' , 'conv' )
if "layer" in name and "backbone" in name:
UpperCAmelCase = name.replace('layer' , 'layers' )
if "backbone.bit.encoder.bit" in name:
UpperCAmelCase = name.replace('backbone.bit.encoder.bit' , 'backbone.bit' )
if "embedder.conv" in name:
UpperCAmelCase = name.replace('embedder.conv' , 'embedder.convolution' )
if "backbone.bit.encoder.stem.norm" in name:
UpperCAmelCase = name.replace('backbone.bit.encoder.stem.norm' , 'backbone.bit.embedder.norm' )
return name
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
UpperCAmelCase = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase = in_proj_bias[: config.hidden_size]
UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def _lowerCAmelCase ( ):
UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCAmelCase = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase , UpperCAmelCase = get_dpt_config(lowercase_ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
UpperCAmelCase = torch.load(lowercase_ , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(lowercase_ )
# rename keys
for key in state_dict.copy().keys():
UpperCAmelCase = state_dict.pop(lowercase_ )
UpperCAmelCase = val
# read in qkv matrices
read_in_q_k_v(lowercase_ , lowercase_ )
# load HuggingFace model
UpperCAmelCase = DPTForSemanticSegmentation(lowercase_ ) if 'ade' in checkpoint_url else DPTForDepthEstimation(lowercase_ )
model.load_state_dict(lowercase_ )
model.eval()
# Check outputs on an image
UpperCAmelCase = 480 if 'ade' in checkpoint_url else 384
UpperCAmelCase = DPTImageProcessor(size=lowercase_ )
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(lowercase_ , return_tensors='pt' )
# forward pass
UpperCAmelCase = model(**lowercase_ ).logits if 'ade' in checkpoint_url else model(**lowercase_ ).predicted_depth
if show_prediction:
UpperCAmelCase = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='bicubic' , align_corners=lowercase_ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
model.push_to_hub('ybelkada/dpt-hybrid-midas' )
image_processor.push_to_hub('ybelkada/dpt-hybrid-midas' )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
parser.add_argument(
"""--show_prediction""",
action="""store_true""",
)
snake_case_ = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 181 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self :Tuple ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ ( self :int ) -> Union[str, Any]:
UpperCAmelCase = 1
UpperCAmelCase = 3
UpperCAmelCase = (32, 32)
UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowercase_ )
return image
@property
def UpperCAmelCase__ ( self :List[Any] ) -> Tuple:
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=lowercase_ , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , )
return model
@property
def UpperCAmelCase__ ( self :str ) -> Any:
torch.manual_seed(0 )
UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def UpperCAmelCase__ ( self :Optional[int] ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='gelu' , projection_dim=5_12 , )
return CLIPTextModel(lowercase_ )
def UpperCAmelCase__ ( self :str ) -> Union[str, Any]:
UpperCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase = self.dummy_cond_unet_upscale
UpperCAmelCase = DDPMScheduler()
UpperCAmelCase = DDIMScheduler(prediction_type='v_prediction' )
UpperCAmelCase = self.dummy_vae
UpperCAmelCase = self.dummy_text_encoder
UpperCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCAmelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase = Image.fromarray(np.uinta(lowercase_ ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCAmelCase = StableDiffusionUpscalePipeline(
unet=lowercase_ , low_res_scheduler=lowercase_ , scheduler=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , max_noise_level=3_50 , )
UpperCAmelCase = sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase = 'A painting of a squirrel eating a burger'
UpperCAmelCase = torch.Generator(device=lowercase_ ).manual_seed(0 )
UpperCAmelCase = sd_pipe(
[prompt] , image=lowercase_ , generator=lowercase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
UpperCAmelCase = output.images
UpperCAmelCase = torch.Generator(device=lowercase_ ).manual_seed(0 )
UpperCAmelCase = sd_pipe(
[prompt] , image=lowercase_ , generator=lowercase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , return_dict=lowercase_ , )[0]
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
UpperCAmelCase = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
UpperCAmelCase = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self :Optional[Any] ) -> Any:
UpperCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase = self.dummy_cond_unet_upscale
UpperCAmelCase = DDPMScheduler()
UpperCAmelCase = DDIMScheduler(prediction_type='v_prediction' )
UpperCAmelCase = self.dummy_vae
UpperCAmelCase = self.dummy_text_encoder
UpperCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCAmelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase = Image.fromarray(np.uinta(lowercase_ ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCAmelCase = StableDiffusionUpscalePipeline(
unet=lowercase_ , low_res_scheduler=lowercase_ , scheduler=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , max_noise_level=3_50 , )
UpperCAmelCase = sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase = 'A painting of a squirrel eating a burger'
UpperCAmelCase = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
UpperCAmelCase = output.images
assert image.shape[0] == 2
UpperCAmelCase = torch.Generator(device=lowercase_ ).manual_seed(0 )
UpperCAmelCase = sd_pipe(
[prompt] , image=lowercase_ , generator=lowercase_ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
UpperCAmelCase = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def UpperCAmelCase__ ( self :Optional[int] ) -> Optional[Any]:
UpperCAmelCase = self.dummy_cond_unet_upscale
UpperCAmelCase = DDPMScheduler()
UpperCAmelCase = DDIMScheduler(prediction_type='v_prediction' )
UpperCAmelCase = self.dummy_vae
UpperCAmelCase = self.dummy_text_encoder
UpperCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCAmelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase = Image.fromarray(np.uinta(lowercase_ ) ).convert('RGB' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
UpperCAmelCase = unet.half()
UpperCAmelCase = text_encoder.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase = StableDiffusionUpscalePipeline(
unet=lowercase_ , low_res_scheduler=lowercase_ , scheduler=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , max_noise_level=3_50 , )
UpperCAmelCase = sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase = 'A painting of a squirrel eating a burger'
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = sd_pipe(
[prompt] , image=lowercase_ , generator=lowercase_ , num_inference_steps=2 , output_type='np' , ).images
UpperCAmelCase = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self :Dict ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self :Optional[int] ) -> int:
UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
UpperCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat.npy' )
UpperCAmelCase = 'stabilityai/stable-diffusion-x4-upscaler'
UpperCAmelCase = StableDiffusionUpscalePipeline.from_pretrained(lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
UpperCAmelCase = 'a cat sitting on a park bench'
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = pipe(
prompt=lowercase_ , image=lowercase_ , generator=lowercase_ , output_type='np' , )
UpperCAmelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Tuple:
UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
UpperCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat_fp16.npy' )
UpperCAmelCase = 'stabilityai/stable-diffusion-x4-upscaler'
UpperCAmelCase = StableDiffusionUpscalePipeline.from_pretrained(
lowercase_ , torch_dtype=torch.floataa , )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
UpperCAmelCase = 'a cat sitting on a park bench'
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = pipe(
prompt=lowercase_ , image=lowercase_ , generator=lowercase_ , output_type='np' , )
UpperCAmelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def UpperCAmelCase__ ( self :Any ) -> List[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
UpperCAmelCase = 'stabilityai/stable-diffusion-x4-upscaler'
UpperCAmelCase = StableDiffusionUpscalePipeline.from_pretrained(
lowercase_ , torch_dtype=torch.floataa , )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase = 'a cat sitting on a park bench'
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = pipe(
prompt=lowercase_ , image=lowercase_ , generator=lowercase_ , num_inference_steps=5 , output_type='np' , )
UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 181 | 1 |
"""simple docstring"""
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=30 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=2 , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = parent
SCREAMING_SNAKE_CASE__ : List[Any] = batch_size
SCREAMING_SNAKE_CASE__ : Any = image_size
SCREAMING_SNAKE_CASE__ : Tuple = patch_size
SCREAMING_SNAKE_CASE__ : Tuple = num_channels
SCREAMING_SNAKE_CASE__ : Union[str, Any] = is_training
SCREAMING_SNAKE_CASE__ : List[str] = use_labels
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE__ : Any = num_hidden_layers
SCREAMING_SNAKE_CASE__ : int = num_attention_heads
SCREAMING_SNAKE_CASE__ : Dict = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : int = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : List[str] = initializer_range
SCREAMING_SNAKE_CASE__ : Optional[Any] = scope
SCREAMING_SNAKE_CASE__ : Union[str, Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ : List[Any] = num_patches + 2
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : int = self.get_config()
return config, pixel_values, labels
def __magic_name__ (self ) -> str:
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = DeiTModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE__ : int = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = DeiTForMaskedImageModeling(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ : int = 1
SCREAMING_SNAKE_CASE__ : List[str] = DeiTForMaskedImageModeling(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE__ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.type_sequence_label_size
SCREAMING_SNAKE_CASE__ : List[Any] = DeiTForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE__ : int = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ : Tuple = 1
SCREAMING_SNAKE_CASE__ : Optional[int] = DeiTForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : str = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE__ : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ (a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Any = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__UpperCamelCase : List[str] = (
{
'''feature-extraction''': DeiTModel,
'''image-classification''': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__UpperCamelCase : List[Any] = False
__UpperCamelCase : Union[str, Any] = False
__UpperCamelCase : Tuple = False
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = DeiTModelTester(self )
SCREAMING_SNAKE_CASE__ : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
pass
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Any = model_class(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ , nn.Linear ) )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : int = model_class(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : Optional[int] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : List[Any] = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(SCREAMING_SNAKE_CASE__ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.train()
SCREAMING_SNAKE_CASE__ : Any = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(**SCREAMING_SNAKE_CASE__ ).loss
loss.backward()
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE__ : List[Any] = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(SCREAMING_SNAKE_CASE__ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(SCREAMING_SNAKE_CASE__ )
model.gradient_checkpointing_enable()
model.to(SCREAMING_SNAKE_CASE__ )
model.train()
SCREAMING_SNAKE_CASE__ : int = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(**SCREAMING_SNAKE_CASE__ ).loss
loss.backward()
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : int = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(SCREAMING_SNAKE_CASE__ ),
*get_values(SCREAMING_SNAKE_CASE__ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type['title']}''' ):
SCREAMING_SNAKE_CASE__ : Dict = problem_type["""title"""]
SCREAMING_SNAKE_CASE__ : List[Any] = problem_type["""num_labels"""]
SCREAMING_SNAKE_CASE__ : Dict = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.train()
SCREAMING_SNAKE_CASE__ : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
if problem_type["num_labels"] > 1:
SCREAMING_SNAKE_CASE__ : List[str] = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=SCREAMING_SNAKE_CASE__ ) as warning_list:
SCREAMING_SNAKE_CASE__ : List[Any] = model(**SCREAMING_SNAKE_CASE__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Tuple = DeiTModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to(
SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE__ : str = prepare_img()
SCREAMING_SNAKE_CASE__ : List[Any] = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
SCREAMING_SNAKE_CASE__ : Tuple = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" )
SCREAMING_SNAKE_CASE__ : Any = self.default_image_processor
SCREAMING_SNAKE_CASE__ : List[Any] = prepare_img()
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = inputs.pixel_values.to(SCREAMING_SNAKE_CASE__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Tuple = model(SCREAMING_SNAKE_CASE__ )
| 25 |
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCAmelCase__ : List[str] = '.'
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCAmelCase__ : List[Any] = [
'Assert',
'AssignVariableOp',
'EmptyTensorList',
'MergeV2Checkpoints',
'ReadVariableOp',
'ResourceGather',
'RestoreV2',
'SaveV2',
'ShardedFilename',
'StatefulPartitionedCall',
'StaticRegexFullMatch',
'VarHandleOp',
]
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = SavedModel()
SCREAMING_SNAKE_CASE__ : Dict = []
with open(os.path.join(_snake_case ,"""utils""" ,"""tf_ops""" ,"""onnx.json""" ) ) as f:
SCREAMING_SNAKE_CASE__ : Any = json.load(_snake_case )["""opsets"""]
for i in range(1 ,opset + 1 ):
onnx_ops.extend(onnx_opsets[str(_snake_case )] )
with open(_snake_case ,"""rb""" ) as f:
saved_model.ParseFromString(f.read() )
SCREAMING_SNAKE_CASE__ : List[str] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
SCREAMING_SNAKE_CASE__ : int = sorted(_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(_snake_case )
if strict and len(_snake_case ) > 0:
raise Exception(f'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(_snake_case ) > 0:
print(f'''Found the following incompatible ops for the opset {opset}:''' )
print(*_snake_case ,sep="""\n""" )
else:
print(f'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).')
parser.add_argument(
'--opset', default=1_2, type=int, help='The ONNX opset against which the model has to be tested.'
)
parser.add_argument(
'--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.'
)
parser.add_argument(
'--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)'
)
UpperCAmelCase__ : Dict = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 25 | 1 |
"""simple docstring"""
def A_ ( _lowerCAmelCase : int, _lowerCAmelCase : int, _lowerCAmelCase : int ):
"""simple docstring"""
if exponent == 1:
return base
if exponent % 2 == 0:
_a = _modexpt(_lowerCAmelCase, exponent // 2, _lowerCAmelCase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(_lowerCAmelCase, exponent - 1, _lowerCAmelCase )) % modulo_value
def A_ ( _lowerCAmelCase : int = 17_77, _lowerCAmelCase : int = 18_55, _lowerCAmelCase : int = 8 ):
"""simple docstring"""
_a = base
for _ in range(1, _lowerCAmelCase ):
_a = _modexpt(_lowerCAmelCase, _lowerCAmelCase, 10**digits )
return result
if __name__ == "__main__":
print(f'{solution() = }')
| 358 |
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> str:
_a = '''ylacombe/bark-small'''
_a = tempfile.mkdtemp()
_a = '''en_speaker_1'''
_a = '''This is a test string'''
_a = '''speaker_embeddings_path.json'''
_a = '''speaker_embeddings'''
def _UpperCAmelCase ( self , **__UpperCAmelCase ) -> Tuple:
return AutoTokenizer.from_pretrained(self.checkpoint , **__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self ) -> str:
_a = self.get_tokenizer()
_a = BarkProcessor(tokenizer=__UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
_a = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_a = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_a = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def _UpperCAmelCase ( self ) -> str:
_a = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_a = 35
_a = 2
_a = 8
_a = {
'''semantic_prompt''': np.ones(__UpperCAmelCase ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_a = processor(text=self.input_string , voice_preset=__UpperCAmelCase )
_a = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__UpperCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_a = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(__UpperCAmelCase , **__UpperCAmelCase )
_a = processor(text=self.input_string , voice_preset=__UpperCAmelCase )
_a = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__UpperCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_a = processor(text=self.input_string , voice_preset=self.voice_preset )
def _UpperCAmelCase ( self ) -> Tuple:
_a = self.get_tokenizer()
_a = BarkProcessor(tokenizer=__UpperCAmelCase )
_a = processor(text=self.input_string )
_a = tokenizer(
self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() ) | 153 | 0 |
'''simple docstring'''
import math
import sys
def __lowerCamelCase ( _lowercase ) -> int:
if number != int(_lowercase ):
raise ValueError("""the value of input must be a natural number""" )
if number < 0:
raise ValueError("""the value of input must not be a negative number""" )
if number == 0:
return 1
UpperCAmelCase : int = [-1] * (number + 1)
UpperCAmelCase : Optional[Any] = 0
for i in range(1 , number + 1 ):
UpperCAmelCase : List[Any] = sys.maxsize
UpperCAmelCase : List[str] = int(math.sqrt(_lowercase ) )
for j in range(1 , root + 1 ):
UpperCAmelCase : List[str] = 1 + answers[i - (j**2)]
UpperCAmelCase : Dict = min(_lowercase , _lowercase )
UpperCAmelCase : Any = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 265 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
a : List[str] = get_tests_dir("""fixtures""")
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> int:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase : Tuple = mock.Mock()
UpperCAmelCase : List[str] = 500
UpperCAmelCase : Any = {}
UpperCAmelCase : List[str] = HTTPError
UpperCAmelCase : str = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=A ) as mock_head:
UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# This check we did call the fake head request
mock_head.assert_called()
def _lowercase( self ) -> Any:
# This test is for deprecated behavior and can be removed in v5
UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" )
def _lowercase( self ) -> Union[str, Any]:
with self.assertRaises(A ):
# config is in subfolder, the following should not work without specifying the subfolder
UpperCAmelCase : Any = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" )
UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" )
self.assertIsNotNone(A )
@is_staging_test
class UpperCamelCase_ ( unittest.TestCase ):
@classmethod
def _lowercase( cls ) -> Dict:
UpperCAmelCase : Tuple = TOKEN
HfFolder.save_token(A )
@classmethod
def _lowercase( cls ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id="""test-image-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" )
except HTTPError:
pass
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token )
UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A , repo_id="""test-image-processor""" , push_to_hub=A , use_auth_token=self._token )
UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
def _lowercase( self ) -> List[str]:
UpperCAmelCase : List[str] = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token )
UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=A , use_auth_token=self._token )
UpperCAmelCase : int = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
def _lowercase( self ) -> Optional[int]:
CustomImageProcessor.register_for_auto_class()
UpperCAmelCase : Optional[Any] = CustomImageProcessor.from_pretrained(A )
image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , )
UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(
f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=A )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
| 265 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case : Union[str, Any] = {
"configuration_xlm_roberta_xl": [
"XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaXLConfig",
"XLMRobertaXLOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Union[str, Any] = [
"XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaXLForCausalLM",
"XLMRobertaXLForMaskedLM",
"XLMRobertaXLForMultipleChoice",
"XLMRobertaXLForQuestionAnswering",
"XLMRobertaXLForSequenceClassification",
"XLMRobertaXLForTokenClassification",
"XLMRobertaXLModel",
"XLMRobertaXLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
_snake_case : str = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 367 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
_snake_case : Union[str, Any] = ["small", "medium", "large"]
_snake_case : List[Any] = "lm_head.decoder.weight"
_snake_case : Optional[Any] = "lm_head.weight"
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
__snake_case : Tuple = torch.load(__lowerCamelCase )
__snake_case : Dict = d.pop(__lowerCamelCase )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
torch.save(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
if __name__ == "__main__":
_snake_case : Dict = argparse.ArgumentParser()
parser.add_argument("--dialogpt_path", default=".", type=str)
_snake_case : Any = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
_snake_case : Dict = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
_snake_case : List[str] = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 134 | 0 |
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase :
def __init__( self : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any]=13 , UpperCAmelCase__ : Tuple=7 , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Tuple=99 , UpperCAmelCase__ : List[str]=16 , UpperCAmelCase__ : Optional[int]=36 , UpperCAmelCase__ : Optional[int]=6 , UpperCAmelCase__ : Optional[int]=6 , UpperCAmelCase__ : Tuple=6 , UpperCAmelCase__ : Any=37 , UpperCAmelCase__ : Dict="gelu" , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Dict=512 , UpperCAmelCase__ : Tuple=16 , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : List[Any]=0.0_2 , UpperCAmelCase__ : Tuple=3 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : List[str]=None , ) -> List[str]:
_a : int = parent
_a : List[Any] = batch_size
_a : int = seq_length
_a : str = is_training
_a : Optional[int] = use_input_mask
_a : Optional[Any] = use_token_type_ids
_a : Tuple = use_labels
_a : Dict = vocab_size
_a : List[Any] = embedding_size
_a : int = hidden_size
_a : Union[str, Any] = num_hidden_layers
_a : Union[str, Any] = num_hidden_groups
_a : int = num_attention_heads
_a : List[Any] = intermediate_size
_a : List[str] = hidden_act
_a : List[Any] = hidden_dropout_prob
_a : Union[str, Any] = attention_probs_dropout_prob
_a : Any = max_position_embeddings
_a : List[str] = type_vocab_size
_a : Dict = type_sequence_label_size
_a : List[str] = initializer_range
_a : Union[str, Any] = num_labels
_a : Union[str, Any] = num_choices
_a : Tuple = scope
def _lowercase ( self : List[str] ) -> Optional[int]:
_a : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : Optional[Any] = None
if self.use_input_mask:
_a : int = random_attention_mask([self.batch_size, self.seq_length] )
_a : str = None
if self.use_token_type_ids:
_a : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a : Optional[int] = None
_a : List[str] = None
_a : Tuple = None
if self.use_labels:
_a : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a : List[str] = ids_tensor([self.batch_size] , self.num_choices )
_a : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self : List[str] ) -> int:
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def _lowercase ( self : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any ) -> Union[str, Any]:
_a : Dict = AlbertModel(config=__lowercase )
model.to(__lowercase )
model.eval()
_a : List[str] = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase )
_a : int = model(__lowercase , token_type_ids=__lowercase )
_a : Dict = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] ) -> Tuple:
_a : Optional[int] = AlbertForPreTraining(config=__lowercase )
model.to(__lowercase )
model.eval()
_a : List[Any] = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , sentence_order_label=__lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any ) -> Tuple:
_a : Dict = AlbertForMaskedLM(config=__lowercase )
model.to(__lowercase )
model.eval()
_a : Dict = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] ) -> Union[str, Any]:
_a : List[str] = AlbertForQuestionAnswering(config=__lowercase )
model.to(__lowercase )
model.eval()
_a : Optional[int] = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , start_positions=__lowercase , end_positions=__lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any ) -> Any:
_a : Optional[int] = self.num_labels
_a : Optional[Any] = AlbertForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
_a : Tuple = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : int ) -> List[Any]:
_a : Any = self.num_labels
_a : Any = AlbertForTokenClassification(config=__lowercase )
model.to(__lowercase )
model.eval()
_a : Any = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int ) -> List[Any]:
_a : Any = self.num_choices
_a : Tuple = AlbertForMultipleChoice(config=__lowercase )
model.to(__lowercase )
model.eval()
_a : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a : Optional[int] = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self : Tuple ) -> List[Any]:
_a : Optional[Any] = self.prepare_config_and_inputs()
(
_a
) : Dict = config_and_inputs
_a : List[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
UpperCamelCase : str = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase : Tuple = (
{
"""feature-extraction""": AlbertModel,
"""fill-mask""": AlbertForMaskedLM,
"""question-answering""": AlbertForQuestionAnswering,
"""text-classification""": AlbertForSequenceClassification,
"""token-classification""": AlbertForTokenClassification,
"""zero-shot""": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase : Dict = True
def _lowercase ( self : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : int=False ) -> List[Any]:
_a : str = super()._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
if return_labels:
if model_class in get_values(__lowercase ):
_a : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowercase )
_a : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowercase )
return inputs_dict
def _lowercase ( self : Tuple ) -> Dict:
_a : str = AlbertModelTester(self )
_a : List[str] = ConfigTester(self , config_class=__lowercase , hidden_size=37 )
def _lowercase ( self : List[Any] ) -> Tuple:
self.config_tester.run_common_tests()
def _lowercase ( self : Optional[Any] ) -> int:
_a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def _lowercase ( self : Union[str, Any] ) -> List[Any]:
_a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowercase )
def _lowercase ( self : int ) -> List[str]:
_a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowercase )
def _lowercase ( self : List[Any] ) -> List[str]:
_a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowercase )
def _lowercase ( self : Union[str, Any] ) -> Optional[Any]:
_a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowercase )
def _lowercase ( self : Tuple ) -> Any:
_a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowercase )
def _lowercase ( self : Tuple ) -> str:
_a : List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_a : int = type
self.model_tester.create_and_check_model(*__lowercase )
@slow
def _lowercase ( self : int ) -> List[Any]:
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Optional[Any] = AlbertModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def _lowercase ( self : List[Any] ) -> Union[str, Any]:
_a : Tuple = AlbertModel.from_pretrained("""albert-base-v2""" )
_a : Tuple = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_a : List[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_a : Optional[Any] = model(__lowercase , attention_mask=__lowercase )[0]
_a : Union[str, Any] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __lowercase )
_a : Optional[Any] = torch.tensor(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowercase , atol=1E-4 ) )
| 294 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
"""simple docstring"""
snake_case__ : Union[str, Any] = []
for part_id in partition_order:
snake_case__ : Any = df.where(f"""SPARK_PARTITION_ID() = {part_id}""" ).collect()
for row_idx, row in enumerate(__lowerCAmelCase ):
expected_row_ids_and_row_dicts.append((f"""{part_id}_{row_idx}""", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ( ) -> Tuple:
"""simple docstring"""
snake_case__ : Any = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
snake_case__ : Optional[Any] = spark.range(100 ).repartition(1 )
snake_case__ : Optional[int] = Spark(__lowerCAmelCase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Any = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
snake_case__ : Dict = spark.range(10 ).repartition(2 )
snake_case__ : Any = [1, 0]
snake_case__ : Tuple = _generate_iterable_examples(__lowerCAmelCase , __lowerCAmelCase ) # Reverse the partitions.
snake_case__ : Any = _get_expected_row_ids_and_row_dicts_for_partition_order(__lowerCAmelCase , __lowerCAmelCase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
snake_case__ , snake_case__ : Union[str, Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ( ) -> Any:
"""simple docstring"""
snake_case__ : Any = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
snake_case__ : List[Any] = spark.range(10 ).repartition(1 )
snake_case__ : int = SparkExamplesIterable(__lowerCAmelCase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__lowerCAmelCase ):
assert row_id == f"""0_{i}"""
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ( ) -> Dict:
"""simple docstring"""
snake_case__ : List[str] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
snake_case__ : Tuple = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
snake_case__ : Union[str, Any] = lambda __lowerCAmelCase : x.reverse()
snake_case__ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(__lowerCAmelCase , [2, 1, 0] )
snake_case__ : List[str] = SparkExamplesIterable(__lowerCAmelCase ).shuffle_data_sources(__lowerCAmelCase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__lowerCAmelCase ):
snake_case__ , snake_case__ : Optional[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ( ) -> List[Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
snake_case__ : Dict = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
snake_case__ : List[Any] = SparkExamplesIterable(__lowerCAmelCase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
snake_case__ : int = _get_expected_row_ids_and_row_dicts_for_partition_order(__lowerCAmelCase , [0, 2] )
for i, (row_id, row_dict) in enumerate(__lowerCAmelCase ):
snake_case__ , snake_case__ : Dict = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
snake_case__ : List[str] = SparkExamplesIterable(__lowerCAmelCase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
snake_case__ : List[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(__lowerCAmelCase , [1, 3] )
for i, (row_id, row_dict) in enumerate(__lowerCAmelCase ):
snake_case__ , snake_case__ : Optional[int] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ( ) -> Dict:
"""simple docstring"""
snake_case__ : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
snake_case__ : Dict = spark.range(100 ).repartition(1 )
snake_case__ : Tuple = Spark(__lowerCAmelCase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 230 | 0 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->None:
'''simple docstring'''
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' , _SCREAMING_SNAKE_CASE , )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 65 |
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# Check if the input is valid
if not len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE ) == 3:
raise ValueError('''Please enter a valid equation.''' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('''Both a & b of two equations can\'t be zero.''' )
# Extract the coefficients
A_ , A_ , A_ : Any = equationa
A_ , A_ , A_ : Union[str, Any] = equationa
# Calculate the determinants of the matrices
A_ : Optional[Any] = aa * ba - aa * ba
A_ : Optional[int] = ca * ba - ca * ba
A_ : List[Any] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('''Infinite solutions. (Consistent system)''' )
else:
raise ValueError('''No solution. (Inconsistent system)''' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
A_ : Optional[int] = determinant_x / determinant
A_ : List[Any] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 65 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __SCREAMING_SNAKE_CASE (metaclass=UpperCamelCase_ ):
"""simple docstring"""
__a =['''transformers''', '''torch''', '''note_seq''']
def __init__( self : Optional[Any] , *__a : Any , **__a : Optional[int] ):
requires_backends(self , ["transformers", "torch", "note_seq"] )
@classmethod
def UpperCamelCase__ ( cls : Optional[int] , *__a : str , **__a : List[Any] ):
requires_backends(cls , ["transformers", "torch", "note_seq"] )
@classmethod
def UpperCamelCase__ ( cls : Tuple , *__a : str , **__a : List[str] ):
requires_backends(cls , ["transformers", "torch", "note_seq"] )
| 63 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowerCAmelCase__ : List[Any] =None
lowerCAmelCase__ : Optional[Any] =logging.get_logger(__name__)
lowerCAmelCase__ : Optional[int] ={'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase__ : int ={
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase__ : int ={
'''facebook/nllb-large-en-ro''': 1024,
'''facebook/nllb-200-distilled-600M''': 1024,
}
# fmt: off
lowerCAmelCase__ : Dict =['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = VOCAB_FILES_NAMES
UpperCamelCase__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Dict = ['''input_ids''', '''attention_mask''']
UpperCamelCase__ : List[str] = NllbTokenizer
UpperCamelCase__ : List[int] = []
UpperCamelCase__ : List[int] = []
def __init__( self , _A=None , _A=None , _A="<s>" , _A="</s>" , _A="</s>" , _A="<s>" , _A="<unk>" , _A="<pad>" , _A="<mask>" , _A=None , _A=None , _A=None , _A=False , **_A , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
__SCREAMING_SNAKE_CASE = legacy_behaviour
super().__init__(
vocab_file=_A , tokenizer_file=_A , bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , src_lang=_A , tgt_lang=_A , additional_special_tokens=_A , legacy_behaviour=_A , **_A , )
__SCREAMING_SNAKE_CASE = vocab_file
__SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
__SCREAMING_SNAKE_CASE = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
__SCREAMING_SNAKE_CASE = {
lang_code: self.convert_tokens_to_ids(_A ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__SCREAMING_SNAKE_CASE = src_lang if src_lang is not None else 'eng_Latn'
__SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(self._src_lang )
__SCREAMING_SNAKE_CASE = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _A ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _A ( self , _A , _A = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _A ( self , _A , _A = None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _A ( self , _A , _A , _A , _A , **_A ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__SCREAMING_SNAKE_CASE = src_lang
__SCREAMING_SNAKE_CASE = self(_A , add_special_tokens=_A , return_tensors=_A , **_A )
__SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(_A )
__SCREAMING_SNAKE_CASE = tgt_lang_id
return inputs
def _A ( self , _A , _A = "eng_Latn" , _A = None , _A = "fra_Latn" , **_A , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = src_lang
__SCREAMING_SNAKE_CASE = tgt_lang
return super().prepare_seqaseq_batch(_A , _A , **_A )
def _A ( self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def _A ( self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(_A )
if self.legacy_behaviour:
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
else:
__SCREAMING_SNAKE_CASE = [self.cur_lang_code]
__SCREAMING_SNAKE_CASE = [self.eos_token_id]
__SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
__SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
__SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(_A )
if self.legacy_behaviour:
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
else:
__SCREAMING_SNAKE_CASE = [self.cur_lang_code]
__SCREAMING_SNAKE_CASE = [self.eos_token_id]
__SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
__SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
__SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _A ( self , _A , _A = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
__SCREAMING_SNAKE_CASE = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ):
copyfile(self.vocab_file , _A )
return (out_vocab_file,)
| 257 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 64 | """simple docstring"""
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
__A = logging.get_logger(__name__)
@add_end_docstrings(a__ )
class _snake_case ( a__ ):
def __init__( self : List[Any] , *UpperCAmelCase : str , **UpperCAmelCase : Union[str, Any] ):
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
requires_backends(self , "decord" )
self.check_model_type(UpperCAmelCase )
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : Tuple=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Any=None ):
__lowerCamelCase : int = {}
if frame_sampling_rate is not None:
__lowerCamelCase : int = frame_sampling_rate
if num_frames is not None:
__lowerCamelCase : List[str] = num_frames
__lowerCamelCase : Optional[Any] = {}
if top_k is not None:
__lowerCamelCase : Optional[Any] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : int , UpperCAmelCase : Union[str, List[str]] , **UpperCAmelCase : Tuple ):
return super().__call__(UpperCAmelCase , **UpperCAmelCase )
def lowerCamelCase__ ( self : int , UpperCAmelCase : str , UpperCAmelCase : List[Any]=None , UpperCAmelCase : List[str]=1 ):
if num_frames is None:
__lowerCamelCase : Optional[int] = self.model.config.num_frames
if video.startswith("http://" ) or video.startswith("https://" ):
__lowerCamelCase : Optional[int] = BytesIO(requests.get(UpperCAmelCase ).content )
__lowerCamelCase : Optional[Any] = VideoReader(UpperCAmelCase )
videoreader.seek(0 )
__lowerCamelCase : str = 0
__lowerCamelCase : int = num_frames * frame_sampling_rate - 1
__lowerCamelCase : Any = np.linspace(UpperCAmelCase , UpperCAmelCase , num=UpperCAmelCase , dtype=np.intaa )
__lowerCamelCase : List[str] = videoreader.get_batch(UpperCAmelCase ).asnumpy()
__lowerCamelCase : Any = list(UpperCAmelCase )
__lowerCamelCase : Any = self.image_processor(UpperCAmelCase , return_tensors=self.framework )
return model_inputs
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : List[Any] ):
__lowerCamelCase : Union[str, Any] = self.model(**UpperCAmelCase )
return model_outputs
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any]=5 ):
if top_k > self.model.config.num_labels:
__lowerCamelCase : str = self.model.config.num_labels
if self.framework == "pt":
__lowerCamelCase : Any = model_outputs.logits.softmax(-1 )[0]
__lowerCamelCase , __lowerCamelCase : int = probs.topk(UpperCAmelCase )
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
__lowerCamelCase : Union[str, Any] = scores.tolist()
__lowerCamelCase : List[Any] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCAmelCase , UpperCAmelCase )] | 64 | 1 |
"""simple docstring"""
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class _lowerCAmelCase ( ctypes.Structure ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def _A ( ) -> int:
'''simple docstring'''
if os.name == "nt":
__lowercase = CursorInfo()
__lowercase = ctypes.windll.kernelaa.GetStdHandle(-11)
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCamelCase_, ctypes.byref(UpperCamelCase_))
__lowercase = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCamelCase_, ctypes.byref(UpperCamelCase_))
elif os.name == "posix":
sys.stdout.write("\033[?25l")
sys.stdout.flush()
def _A ( ) -> int:
'''simple docstring'''
if os.name == "nt":
__lowercase = CursorInfo()
__lowercase = ctypes.windll.kernelaa.GetStdHandle(-11)
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCamelCase_, ctypes.byref(UpperCamelCase_))
__lowercase = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCamelCase_, ctypes.byref(UpperCamelCase_))
elif os.name == "posix":
sys.stdout.write("\033[?25h")
sys.stdout.flush()
@contextmanager
def _A ( ) -> Optional[int]:
'''simple docstring'''
try:
hide_cursor()
yield
finally:
show_cursor()
| 17 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : List[Any] =logging.get_logger(__name__)
def UpperCamelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]=False ):
A__ = []
# fmt: off
# stem:
rename_keys.append(("cls_token", "vit.embeddings.cls_token") )
rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") )
rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") )
# backbone
rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A__ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
# fmt: on
return rename_keys
def UpperCamelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : int , _lowerCamelCase : List[Any]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
A__ = ""
else:
A__ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
A__ = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[
: config.hidden_size, :
]
A__ = in_proj_bias[: config.hidden_size]
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ = in_proj_weight[
-config.hidden_size :, :
]
A__ = in_proj_bias[-config.hidden_size :]
def UpperCamelCase ( _lowerCamelCase : int ):
A__ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple , _lowerCamelCase : List[Any] ):
A__ = dct.pop(_lowerCamelCase )
A__ = val
def UpperCamelCase ( ):
A__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A__ = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any , _lowerCamelCase : Optional[Any]=False ):
A__ = BitConfig(
global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=_lowerCamelCase , )
A__ = ViTHybridConfig(backbone_config=_lowerCamelCase , image_size=3_84 , num_labels=10_00 )
A__ = False
# load original model from timm
A__ = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A__ = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
A__ = create_rename_keys(_lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A__ = "huggingface/label-files"
A__ = "imagenet-1k-id2label.json"
A__ = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
A__ = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
A__ = ViTHybridModel(_lowerCamelCase ).eval()
else:
A__ = ViTHybridForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# create image processor
A__ = create_transform(**resolve_data_config({} , model=_lowerCamelCase ) )
A__ = transform.transforms
A__ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
A__ = ViTHybridImageProcessor(
do_resize=_lowerCamelCase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowerCamelCase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=_lowerCamelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
A__ = prepare_img()
A__ = transform(_lowerCamelCase ).unsqueeze(0 )
A__ = processor(_lowerCamelCase , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
# verify logits
with torch.no_grad():
A__ = model(_lowerCamelCase )
A__ = outputs.logits
print("Predicted class:" , logits.argmax(-1 ).item() )
if base_model:
A__ = timm_model.forward_features(_lowerCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCamelCase , outputs.pooler_output , atol=1e-3 )
else:
A__ = timm_model(_lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print(F"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(F"ybelkada/{vit_name}" )
processor.push_to_hub(F"ybelkada/{vit_name}" )
if __name__ == "__main__":
__lowerCAmelCase : List[str] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
__lowerCAmelCase : Optional[int] =parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 237 | 0 |
from typing import Any
import numpy as np
def _UpperCAmelCase (UpperCamelCase_ : np.ndarray ):
'''simple docstring'''
return np.array_equal(lowercase__ , matrix.conjugate().T )
def _UpperCAmelCase (UpperCamelCase_ : np.ndarray , UpperCamelCase_ : np.ndarray ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = v.conjugate().T
_lowerCAmelCase : Union[str, Any] = v_star.dot(lowercase__ )
assert isinstance(lowercase__ , np.ndarray )
return (v_star_dot.dot(lowercase__ )) / (v_star.dot(lowercase__ ))
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
_lowerCAmelCase : Any = np.array([[1], [2], [3]] )
assert is_hermitian(lowercase__ ), F"{a} is not hermitian."
print(rayleigh_quotient(lowercase__ , lowercase__ ) )
_lowerCAmelCase : Tuple = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowercase__ ), F"{a} is not hermitian."
assert rayleigh_quotient(lowercase__ , lowercase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 356 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCamelCase : Optional[Any] = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = ["ConditionalDetrFeatureExtractor"]
_lowerCamelCase : Optional[Any] = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 159 | 0 |
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""",
# See all BART models at https://huggingface.co/models?filter=bart
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "bart"
__UpperCamelCase = ["past_key_values"]
__UpperCamelCase = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : int , lowercase_ : Any=50265 , lowercase_ : List[str]=1024 , lowercase_ : List[Any]=12 , lowercase_ : str=4096 , lowercase_ : int=16 , lowercase_ : Optional[Any]=12 , lowercase_ : List[str]=4096 , lowercase_ : Optional[Any]=16 , lowercase_ : Optional[int]=0.0 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : Optional[Any]="gelu" , lowercase_ : int=1024 , lowercase_ : Any=0.1 , lowercase_ : Dict=0.0 , lowercase_ : Any=0.0 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Tuple=0.0 , lowercase_ : Tuple=False , lowercase_ : str=True , lowercase_ : Optional[int]=3 , lowercase_ : Optional[int]=1 , lowercase_ : Dict=0 , lowercase_ : Tuple=2 , lowercase_ : Union[str, Any]=True , lowercase_ : Dict=2 , lowercase_ : Dict=2 , **lowercase_ : int , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = vocab_size
SCREAMING_SNAKE_CASE_ : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE_ : List[Any] = d_model
SCREAMING_SNAKE_CASE_ : int = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ : Any = encoder_layers
SCREAMING_SNAKE_CASE_ : Tuple = encoder_attention_heads
SCREAMING_SNAKE_CASE_ : str = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ : str = decoder_layers
SCREAMING_SNAKE_CASE_ : int = decoder_attention_heads
SCREAMING_SNAKE_CASE_ : Any = dropout
SCREAMING_SNAKE_CASE_ : Union[str, Any] = attention_dropout
SCREAMING_SNAKE_CASE_ : int = activation_dropout
SCREAMING_SNAKE_CASE_ : Optional[int] = activation_function
SCREAMING_SNAKE_CASE_ : List[str] = init_std
SCREAMING_SNAKE_CASE_ : Optional[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE_ : Optional[int] = decoder_layerdrop
SCREAMING_SNAKE_CASE_ : Tuple = classifier_dropout
SCREAMING_SNAKE_CASE_ : str = use_cache
SCREAMING_SNAKE_CASE_ : Dict = encoder_layers
SCREAMING_SNAKE_CASE_ : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , forced_eos_token_id=lowercase_ , **lowercase_ , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , lowercase_):
SCREAMING_SNAKE_CASE_ : str = self.bos_token_id
warnings.warn(
F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'''The config can simply be saved and uploaded again to be fixed.''')
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE_ : Dict = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
])
if self.use_past:
SCREAMING_SNAKE_CASE_ : List[Any] = {0: '''batch'''}
SCREAMING_SNAKE_CASE_ : Tuple = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE_ : Tuple = {0: '''batch''', 1: '''decoder_sequence'''}
SCREAMING_SNAKE_CASE_ : Any = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction='''inputs''')
elif self.task == "causal-lm":
# TODO: figure this case out.
SCREAMING_SNAKE_CASE_ : str = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
])
if self.use_past:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_layers
for i in range(lowercase_):
SCREAMING_SNAKE_CASE_ : List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE_ : List[Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
])
return common_inputs
@property
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE_ : List[Any] = super().outputs
else:
SCREAMING_SNAKE_CASE_ : List[str] = super(lowercase_ , self).outputs
if self.use_past:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_layers
for i in range(lowercase_):
SCREAMING_SNAKE_CASE_ : Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
SCREAMING_SNAKE_CASE_ : Optional[int] = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_)
# Generate decoder inputs
SCREAMING_SNAKE_CASE_ : Union[str, Any] = seq_length if not self.use_past else 1
SCREAMING_SNAKE_CASE_ : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
SCREAMING_SNAKE_CASE_ : Dict = dict(**lowercase_ , **lowercase_)
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''')
else:
import torch
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = common_inputs['''input_ids'''].shape
SCREAMING_SNAKE_CASE_ : Any = common_inputs['''decoder_input_ids'''].shape[1]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.num_attention_heads
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = decoder_seq_length + 3
SCREAMING_SNAKE_CASE_ : Tuple = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
SCREAMING_SNAKE_CASE_ : str = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowercase_ , lowercase_)] , dim=1)
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.num_layers
SCREAMING_SNAKE_CASE_ : Union[str, Any] = min(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = max(lowercase_ , lowercase_) - min_num_layers
SCREAMING_SNAKE_CASE_ : List[Any] = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowercase_):
common_inputs["past_key_values"].append(
(
torch.zeros(lowercase_),
torch.zeros(lowercase_),
torch.zeros(lowercase_),
torch.zeros(lowercase_),
))
# TODO: test this.
SCREAMING_SNAKE_CASE_ : Any = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowercase_ , lowercase_):
common_inputs["past_key_values"].append((torch.zeros(lowercase_), torch.zeros(lowercase_)))
return common_inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_)
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''')
else:
import torch
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE_ : Tuple = seqlen + 2
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.num_layers
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.num_attention_heads
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE_ : Optional[int] = common_inputs['''attention_mask'''].dtype
SCREAMING_SNAKE_CASE_ : str = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowercase_ , lowercase_ , dtype=lowercase_)] , dim=1)
SCREAMING_SNAKE_CASE_ : str = [
(torch.zeros(lowercase_), torch.zeros(lowercase_)) for _ in range(lowercase_)
]
return common_inputs
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = compute_effective_axis_dimension(
lowercase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer.num_special_tokens_to_add(lowercase_)
SCREAMING_SNAKE_CASE_ : str = compute_effective_axis_dimension(
lowercase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase_)
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE_ : Tuple = [''' '''.join([tokenizer.unk_token]) * seq_length] * batch_size
SCREAMING_SNAKE_CASE_ : Dict = dict(tokenizer(lowercase_ , return_tensors=lowercase_))
return common_inputs
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_)
elif self.task == "causal-lm":
SCREAMING_SNAKE_CASE_ : Optional[int] = self._generate_dummy_inputs_for_causal_lm(
lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_)
else:
SCREAMING_SNAKE_CASE_ : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_)
return common_inputs
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : str , lowercase_ : int , lowercase_ : Tuple , lowercase_ : str):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE_ : Dict = super()._flatten_past_key_values_(lowercase_ , lowercase_ , lowercase_ , lowercase_)
else:
SCREAMING_SNAKE_CASE_ : Tuple = super(lowercase_ , self)._flatten_past_key_values_(
lowercase_ , lowercase_ , lowercase_ , lowercase_)
| 91 |
"""simple docstring"""
import random
from typing import Any
def _A (__a ) -> list[Any]:
"""simple docstring"""
for _ in range(len(__a ) ):
SCREAMING_SNAKE_CASE_ : Optional[int] = random.randint(0 , len(__a ) - 1 )
SCREAMING_SNAKE_CASE_ : Tuple = random.randint(0 , len(__a ) - 1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = data[b], data[a]
return data
if __name__ == "__main__":
UpperCAmelCase_ : Dict = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase_ : Dict = ["""python""", """says""", """hello""", """!"""]
print("""Fisher-Yates Shuffle:""")
print("""List""", integers, strings)
print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 91 | 1 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__lowerCAmelCase = logging.get_logger(__name__)
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Tuple = ['input_features', 'is_longer']
def __init__( self : Optional[Any] ,_UpperCAmelCase : Optional[int]=64 ,_UpperCAmelCase : Dict=48000 ,_UpperCAmelCase : int=480 ,_UpperCAmelCase : Any=10 ,_UpperCAmelCase : List[str]=1024 ,_UpperCAmelCase : List[Any]=0.0 ,_UpperCAmelCase : Optional[int]=False ,_UpperCAmelCase : float = 0 ,_UpperCAmelCase : float = 14000 ,_UpperCAmelCase : int = None ,_UpperCAmelCase : str = "fusion" ,_UpperCAmelCase : str = "repeatpad" ,**_UpperCAmelCase : Optional[Any] ,):
super().__init__(
feature_size=_UpperCAmelCase ,sampling_rate=_UpperCAmelCase ,padding_value=_UpperCAmelCase ,return_attention_mask=_UpperCAmelCase ,**_UpperCAmelCase ,)
_a : Optional[int] = top_db
_a : List[Any] = truncation
_a : List[Any] = padding
_a : Union[str, Any] = fft_window_size
_a : Tuple = (fft_window_size >> 1) + 1
_a : Tuple = hop_length
_a : Optional[Any] = max_length_s
_a : List[str] = max_length_s * sampling_rate
_a : str = sampling_rate
_a : Optional[int] = frequency_min
_a : List[Any] = frequency_max
_a : Any = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=_UpperCAmelCase ,min_frequency=_UpperCAmelCase ,max_frequency=_UpperCAmelCase ,sampling_rate=_UpperCAmelCase ,norm=_UpperCAmelCase ,mel_scale='htk' ,)
_a : List[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=_UpperCAmelCase ,min_frequency=_UpperCAmelCase ,max_frequency=_UpperCAmelCase ,sampling_rate=_UpperCAmelCase ,norm='slaney' ,mel_scale='slaney' ,)
def __lowercase ( self : Optional[int] ):
_a : Tuple = copy.deepcopy(self.__dict__ )
_a : Dict = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __lowercase ( self : Tuple ,_UpperCAmelCase : np.array ,_UpperCAmelCase : Optional[np.array] = None ):
_a : Any = spectrogram(
_UpperCAmelCase ,window_function(self.fft_window_size ,'hann' ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=_UpperCAmelCase ,log_mel='dB' ,)
return log_mel_spectrogram.T
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Dict ):
_a : str = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_a : str = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_a : Optional[int] = [0]
# randomly choose index for each part
_a : Dict = np.random.choice(ranges[0] )
_a : int = np.random.choice(ranges[1] )
_a : Optional[Any] = np.random.choice(ranges[2] )
_a : Dict = mel[idx_front : idx_front + chunk_frames, :]
_a : Dict = mel[idx_middle : idx_middle + chunk_frames, :]
_a : Dict = mel[idx_back : idx_back + chunk_frames, :]
_a : Union[str, Any] = torch.tensor(mel[None, None, :] )
_a : List[str] = torch.nn.functional.interpolate(
_UpperCAmelCase ,size=[chunk_frames, 64] ,mode='bilinear' ,align_corners=_UpperCAmelCase )
_a : int = mel_shrink[0][0].numpy()
_a : Any = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 )
return mel_fusion
def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : np.array ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : str ,_UpperCAmelCase : str ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_a : Optional[Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_a : int = len(_UpperCAmelCase ) - max_length
_a : Any = np.random.randint(0 ,overflow + 1 )
_a : Optional[Any] = waveform[idx : idx + max_length]
_a : int = self._np_extract_fbank_features(_UpperCAmelCase ,self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_a : Any = self._np_extract_fbank_features(_UpperCAmelCase ,self.mel_filters )
_a : Tuple = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_a : Any = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_a : Any = np.stack([mel, mel, mel, mel] ,axis=0 )
_a : Dict = False
else:
_a : Tuple = self._random_mel_fusion(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
_a : int = True
else:
raise NotImplementedError(F"""data_truncating {truncation} not implemented""" )
else:
_a : List[str] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_a : str = int(max_length / len(_UpperCAmelCase ) )
_a : str = np.stack(np.tile(_UpperCAmelCase ,n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_a : int = int(max_length / len(_UpperCAmelCase ) )
_a : Optional[Any] = np.stack(np.tile(_UpperCAmelCase ,_UpperCAmelCase ) )
_a : Union[str, Any] = np.pad(_UpperCAmelCase ,(0, max_length - waveform.shape[0]) ,mode='constant' ,constant_values=0 )
if truncation == "fusion":
_a : str = self._np_extract_fbank_features(_UpperCAmelCase ,self.mel_filters )
_a : Union[str, Any] = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 )
else:
_a : Union[str, Any] = self._np_extract_fbank_features(_UpperCAmelCase ,self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : str ,_UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,_UpperCAmelCase : str = None ,_UpperCAmelCase : Optional[str] = None ,_UpperCAmelCase : Optional[int] = None ,_UpperCAmelCase : Optional[int] = None ,_UpperCAmelCase : Optional[Union[str, TensorType]] = None ,**_UpperCAmelCase : Tuple ,):
_a : Dict = truncation if truncation is not None else self.truncation
_a : int = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_a : Tuple = isinstance(_UpperCAmelCase ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
_a : Any = is_batched_numpy or (
isinstance(_UpperCAmelCase ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
_a : Dict = [np.asarray(_UpperCAmelCase ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_UpperCAmelCase ,np.ndarray ):
_a : List[Any] = np.asarray(_UpperCAmelCase ,dtype=np.floataa )
elif isinstance(_UpperCAmelCase ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_a : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_a : int = [np.asarray(_UpperCAmelCase )]
# convert to mel spectrogram, truncate and pad if needed.
_a : Optional[int] = [
self._get_input_mel(_UpperCAmelCase ,max_length if max_length else self.nb_max_samples ,_UpperCAmelCase ,_UpperCAmelCase )
for waveform in raw_speech
]
_a : Dict = []
_a : str = []
for mel, longer in padded_inputs:
input_mel.append(_UpperCAmelCase )
is_longer.append(_UpperCAmelCase )
if truncation == "fusion" and sum(_UpperCAmelCase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_a : str = np.random.randint(0 ,len(_UpperCAmelCase ) )
_a : Optional[int] = True
if isinstance(input_mel[0] ,_UpperCAmelCase ):
_a : int = [np.asarray(_UpperCAmelCase ,dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_a : Union[str, Any] = [[longer] for longer in is_longer]
_a : str = {'input_features': input_mel, 'is_longer': is_longer}
_a : str = BatchFeature(_UpperCAmelCase )
if return_tensors is not None:
_a : str = input_features.convert_to_tensors(_UpperCAmelCase )
return input_features
| 107 |
'''simple docstring'''
import heapq
def __lowerCamelCase ( lowerCAmelCase_ ) -> set[int]:
_a : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCAmelCase_ , [-1 * len(lowerCAmelCase_ ), (key, value)] )
# chosen_vertices = set of chosen vertices
_a : str = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
_a : Optional[int] = heapq.heappop(lowerCAmelCase_ )[1][0]
chosen_vertices.add(lowerCAmelCase_ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
_a : Dict = elem[1][1].index(lowerCAmelCase_ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCAmelCase_ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 107 | 1 |
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class SCREAMING_SNAKE_CASE :
__lowerCamelCase : torch.Tensor # [batch_size x 3]
__lowerCamelCase : torch.Tensor # [batch_size x 3]
__lowerCamelCase : torch.Tensor # [batch_size x 3]
__lowerCamelCase : torch.Tensor # [batch_size x 3]
__lowerCamelCase : int
__lowerCamelCase : int
__lowerCamelCase : float
__lowerCamelCase : float
__lowerCamelCase : Tuple[int]
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = torch.arange(self.height * self.width )
__a = torch.stack(
[
pixel_indices % self.width,
torch.div(__lowercase , self.width , rounding_mode="""trunc""" ),
] , axis=1 , )
return coords
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = self.shape
__a = int(np.prod(__lowercase ) )
__a = self.get_image_coords()
__a = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
__a = self.get_camera_rays(__lowercase )
__a = rays.view(__lowercase , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def UpperCamelCase_ ( self : int , __lowercase : int ):
'''simple docstring'''
__a = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
__a = coords.view(__lowercase , -1 , 2 )
__a = self.resolution()
__a = self.fov()
__a = (flat.float() / (res - 1)) * 2 - 1
__a = fracs * torch.tan(fov / 2 )
__a = fracs.view(__lowercase , -1 , 2 )
__a = (
self.z.view(__lowercase , 1 , 3 )
+ self.x.view(__lowercase , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(__lowercase , 1 , 3 ) * fracs[:, :, 1:]
)
__a = directions / directions.norm(dim=-1 , keepdim=__lowercase )
__a = torch.stack(
[
torch.broadcast_to(self.origin.view(__lowercase , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(__lowercase , *__lowercase , 2 , 3 )
def UpperCamelCase_ ( self : str , __lowercase : str , __lowercase : Optional[int] ):
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=__lowercase , height=__lowercase , x_fov=self.x_fov , y_fov=self.y_fov , )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__a = []
__a = []
__a = []
__a = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
__a = np.array([np.sin(lowerCamelCase__ ), np.cos(lowerCamelCase__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
__a = -z * 4
__a = np.array([np.cos(lowerCamelCase__ ), -np.sin(lowerCamelCase__ ), 0.0] )
__a = np.cross(lowerCamelCase__ , lowerCamelCase__ )
origins.append(lowerCamelCase__ )
xs.append(lowerCamelCase__ )
ys.append(lowerCamelCase__ )
zs.append(lowerCamelCase__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(lowerCamelCase__ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowerCamelCase__ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowerCamelCase__ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowerCamelCase__ , axis=0 ) ).float() , width=lowerCamelCase__ , height=lowerCamelCase__ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowerCamelCase__ )) , )
| 302 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowerCamelCase :Tuple = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
def _a (self , lowercase ):
if isinstance(lowercase , lowercase ):
A_ : Tuple = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__(self , lowercase , lowercase , lowercase ):
if len(lowercase ) == 0 or len(lowercase ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(lowercase ) )
if isinstance(lowercase , lowercase ):
A_ : Optional[Any] = [sequences]
A_ : List[Any] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(lowercase )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__UpperCAmelCase )
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase=ZeroShotClassificationArgumentHandler() , *lowercase , **lowercase ):
A_ : str = args_parser
super().__init__(*lowercase , **lowercase )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def _a (self ):
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def _a (self , lowercase , lowercase=True , lowercase=True , lowercase=TruncationStrategy.ONLY_FIRST , **lowercase ):
A_ : Optional[int] = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
A_ : Any = self.tokenizer.eos_token
try:
A_ : Optional[Any] = self.tokenizer(
lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , truncation=lowercase , )
except Exception as e:
if "too short" in str(lowercase ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
A_ : str = self.tokenizer(
lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _a (self , **lowercase ):
if kwargs.get("""multi_class""" , lowercase ) is not None:
A_ : Union[str, Any] = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
A_ : Tuple = {}
if "candidate_labels" in kwargs:
A_ : Optional[int] = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
A_ : List[str] = kwargs["""hypothesis_template"""]
A_ : Union[str, Any] = {}
if "multi_label" in kwargs:
A_ : Optional[int] = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__(self , lowercase , *lowercase , **lowercase , ):
if len(lowercase ) == 0:
pass
elif len(lowercase ) == 1 and "candidate_labels" not in kwargs:
A_ : Any = args[0]
else:
raise ValueError(F'Unable to understand extra arguments {args}' )
return super().__call__(lowercase , **lowercase )
def _a (self , lowercase , lowercase=None , lowercase="This example is {}." ):
A_, A_ : Any = self._args_parser(lowercase , lowercase , lowercase )
for i, (candidate_label, sequence_pair) in enumerate(zip(lowercase , lowercase ) ):
A_ : List[Any] = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(lowercase ) - 1,
**model_input,
}
def _a (self , lowercase ):
A_ : Dict = inputs["""candidate_label"""]
A_ : Any = inputs["""sequence"""]
A_ : List[Any] = {k: inputs[k] for k in self.tokenizer.model_input_names}
A_ : Optional[int] = self.model(**lowercase )
A_ : Optional[Any] = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def _a (self , lowercase , lowercase=False ):
A_ : Tuple = [outputs["""candidate_label"""] for outputs in model_outputs]
A_ : Optional[int] = [outputs["""sequence"""] for outputs in model_outputs]
A_ : Union[str, Any] = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
A_ : List[str] = logits.shape[0]
A_ : Optional[int] = len(lowercase )
A_ : int = N // n
A_ : int = logits.reshape((num_sequences, n, -1) )
if multi_label or len(lowercase ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
A_ : Dict = self.entailment_id
A_ : Dict = -1 if entailment_id == 0 else 0
A_ : str = reshaped_outputs[..., [contradiction_id, entailment_id]]
A_ : Optional[Any] = np.exp(lowercase ) / np.exp(lowercase ).sum(-1 , keepdims=lowercase )
A_ : Tuple = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
A_ : Optional[int] = reshaped_outputs[..., self.entailment_id]
A_ : Any = np.exp(lowercase ) / np.exp(lowercase ).sum(-1 , keepdims=lowercase )
A_ : List[str] = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
} | 206 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_snake_case = 25_0004
_snake_case = 25_0020
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( snake_case_ , unittest.TestCase ):
UpperCamelCase : List[Any] = MBartTokenizer
UpperCamelCase : str = MBartTokenizerFast
UpperCamelCase : Dict = True
UpperCamelCase : Optional[int] = True
def _lowercase ( self : Tuple ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
_a : Tuple = MBartTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self : int ) -> Optional[Any]:
_a : Union[str, Any] = MBartTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
_a : List[str] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCAmelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_a : int = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_a : Tuple = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
_a : Optional[int] = tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def _lowercase ( self : int ) -> str:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_a : List[Any] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_a : Dict = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
_a : str = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
_a : Optional[int] = tempfile.mkdtemp()
_a : str = tokenizer_r.save_pretrained(UpperCAmelCase__ )
_a : str = tokenizer_p.save_pretrained(UpperCAmelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
_a : Optional[Any] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# Checks everything loads correctly in the same way
_a : str = tokenizer_r.from_pretrained(UpperCAmelCase__ )
_a : List[Any] = tokenizer_p.from_pretrained(UpperCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase__ , UpperCAmelCase__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(UpperCAmelCase__ )
# Save tokenizer rust, legacy_format=True
_a : Union[str, Any] = tempfile.mkdtemp()
_a : List[Any] = tokenizer_r.save_pretrained(UpperCAmelCase__ , legacy_format=UpperCAmelCase__ )
_a : List[str] = tokenizer_p.save_pretrained(UpperCAmelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# Checks everything loads correctly in the same way
_a : Dict = tokenizer_r.from_pretrained(UpperCAmelCase__ )
_a : List[str] = tokenizer_p.from_pretrained(UpperCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase__ , UpperCAmelCase__ ) )
shutil.rmtree(UpperCAmelCase__ )
# Save tokenizer rust, legacy_format=False
_a : List[Any] = tempfile.mkdtemp()
_a : Optional[int] = tokenizer_r.save_pretrained(UpperCAmelCase__ , legacy_format=UpperCAmelCase__ )
_a : List[str] = tokenizer_p.save_pretrained(UpperCAmelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_a : Any = tokenizer_r.from_pretrained(UpperCAmelCase__ )
_a : List[Any] = tokenizer_p.from_pretrained(UpperCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase__ , UpperCAmelCase__ ) )
shutil.rmtree(UpperCAmelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( unittest.TestCase ):
UpperCamelCase : Tuple = '''facebook/mbart-large-en-ro'''
UpperCamelCase : Optional[Any] = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
UpperCamelCase : Dict = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
UpperCamelCase : Union[str, Any] = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def _lowercase ( cls : int ) -> Tuple:
_a : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
_a : Optional[Any] = 1
return cls
def _lowercase ( self : int ) -> Tuple:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 250020 )
def _lowercase ( self : Union[str, Any] ) -> List[str]:
_a : Tuple = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCAmelCase__ )
def _lowercase ( self : Dict ) -> Dict:
self.assertIn(UpperCAmelCase__ , self.tokenizer.all_special_ids )
_a : str = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
_a : Any = self.tokenizer.decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
_a : Optional[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase__ )
def _lowercase ( self : Tuple ) -> str:
_a : Dict = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , UpperCAmelCase__ )
_a : Any = 10
_a : Tuple = self.tokenizer(UpperCAmelCase__ , max_length=UpperCAmelCase__ , truncation=UpperCAmelCase__ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , UpperCAmelCase__ )
self.assertEqual(len(UpperCAmelCase__ ) , UpperCAmelCase__ )
def _lowercase ( self : List[str] ) -> Optional[int]:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [250026, 250001] )
def _lowercase ( self : Optional[int] ) -> List[str]:
_a : Any = tempfile.mkdtemp()
_a : Optional[Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(UpperCAmelCase__ )
_a : Any = MBartTokenizer.from_pretrained(UpperCAmelCase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCAmelCase__ )
@require_torch
def _lowercase ( self : Union[str, Any] ) -> List[Any]:
_a : Optional[int] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase__ , return_tensors="""pt""" )
_a : List[Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def _lowercase ( self : List[str] ) -> Union[str, Any]:
_a : Optional[int] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
_a : Optional[int] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
_a : Optional[int] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , UpperCAmelCase__ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def _lowercase ( self : Dict ) -> Dict:
_a : Optional[int] = self.tokenizer(self.src_text , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=3 , return_tensors="""pt""" )
_a : List[Any] = self.tokenizer(
text_target=self.tgt_text , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=10 , return_tensors="""pt""" )
_a : str = targets["""input_ids"""]
_a : List[str] = shift_tokens_right(UpperCAmelCase__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _lowercase ( self : List[str] ) -> Union[str, Any]:
_a : int = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , {
# A, test, EOS, en_XX
"""input_ids""": [[62, 3034, 2, 250004]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 250001,
} , )
| 324 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
UpperCamelCase : Union[str, Any] = StableDiffusionXLImgaImgPipeline
UpperCamelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
UpperCamelCase : Tuple = PipelineTesterMixin.required_optional_params - {'''latents'''}
UpperCamelCase : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _lowercase ( self : Any ) -> List[Any]:
torch.manual_seed(0 )
_a : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase__ , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
_a : Union[str, Any] = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
_a : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_a : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=32 , )
_a : Tuple = CLIPTextModel(UpperCAmelCase__ )
_a : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=UpperCAmelCase__ )
_a : Dict = CLIPTextModelWithProjection(UpperCAmelCase__ )
_a : Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=UpperCAmelCase__ )
_a : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int=0 ) -> int:
_a : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
_a : Any = image / 2 + 0.5
if str(UpperCAmelCase__ ).startswith("""mps""" ):
_a : Any = torch.manual_seed(UpperCAmelCase__ )
else:
_a : Tuple = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
_a : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.7_5,
}
return inputs
def _lowercase ( self : Any ) -> List[Any]:
_a : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_a : Dict = self.get_dummy_components()
_a : List[Any] = StableDiffusionXLImgaImgPipeline(**UpperCAmelCase__ )
_a : Union[str, Any] = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : List[str] = self.get_dummy_inputs(UpperCAmelCase__ )
_a : List[str] = sd_pipe(**UpperCAmelCase__ ).images
_a : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_a : List[str] = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowercase ( self : Any ) -> Any:
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def _lowercase ( self : List[Any] ) -> Optional[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def _lowercase ( self : Any ) -> Any:
pass
def _lowercase ( self : Tuple ) -> Union[str, Any]:
_a : int = self.get_dummy_components()
_a : Any = StableDiffusionXLImgaImgPipeline(**UpperCAmelCase__ )
_a : Dict = sd_pipe.to(UpperCAmelCase__ )
_a : List[str] = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
# forward without prompt embeds
_a : int = self.get_dummy_inputs(UpperCAmelCase__ )
_a : List[str] = 3 * ["""this is a negative prompt"""]
_a : Dict = negative_prompt
_a : Dict = 3 * [inputs["""prompt"""]]
_a : Optional[Any] = sd_pipe(**UpperCAmelCase__ )
_a : Tuple = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
_a : int = self.get_dummy_inputs(UpperCAmelCase__ )
_a : Union[str, Any] = 3 * ["""this is a negative prompt"""]
_a : int = 3 * [inputs.pop("""prompt""" )]
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : List[str] = sd_pipe.encode_prompt(UpperCAmelCase__ , negative_prompt=UpperCAmelCase__ )
_a : Tuple = sd_pipe(
**UpperCAmelCase__ , prompt_embeds=UpperCAmelCase__ , negative_prompt_embeds=UpperCAmelCase__ , pooled_prompt_embeds=UpperCAmelCase__ , negative_pooled_prompt_embeds=UpperCAmelCase__ , )
_a : Dict = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : List[str] ) -> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : str="cpu" , UpperCAmelCase__ : str=torch.floataa , UpperCAmelCase__ : List[Any]=0 ) -> List[str]:
_a : List[str] = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
_a : Union[str, Any] = np.random.RandomState(UpperCAmelCase__ ).standard_normal((1, 4, 64, 64) )
_a : List[Any] = torch.from_numpy(UpperCAmelCase__ ).to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__ )
_a : Any = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _lowercase ( self : int ) -> Union[str, Any]:
_a : Union[str, Any] = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : List[str] = self.get_inputs(UpperCAmelCase__ )
_a : Tuple = pipe(**UpperCAmelCase__ ).images
_a : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_a : int = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 324 | 1 |
import colorsys
from PIL import Image # type: ignore
def lowerCamelCase__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int ):
"""simple docstring"""
lowerCAmelCase_ = x
lowerCAmelCase_ = y
for step in range(snake_case__ ): # noqa: B007
lowerCAmelCase_ = a * a - b * b + x
lowerCAmelCase_ = 2 * a * b + y
lowerCAmelCase_ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def lowerCamelCase__ ( __lowerCAmelCase : Any ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def lowerCamelCase__ ( __lowerCAmelCase : List[str] ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(snake_case__ , 1 , 1 ) )
def lowerCamelCase__ ( __lowerCAmelCase : Optional[Any] = 800 , __lowerCAmelCase : Any = 600 , __lowerCAmelCase : Tuple = -0.6 , __lowerCAmelCase : Any = 0 , __lowerCAmelCase : Optional[Any] = 3.2 , __lowerCAmelCase : Optional[Any] = 50 , __lowerCAmelCase : Union[str, Any] = True , ):
"""simple docstring"""
lowerCAmelCase_ = Image.new("RGB" , (image_width, image_height) )
lowerCAmelCase_ = img.load()
# loop through the image-coordinates
for image_x in range(snake_case__ ):
for image_y in range(snake_case__ ):
# determine the figure-coordinates based on the image-coordinates
lowerCAmelCase_ = figure_width / image_width * image_height
lowerCAmelCase_ = figure_center_x + (image_x / image_width - 0.5) * figure_width
lowerCAmelCase_ = figure_center_y + (image_y / image_height - 0.5) * figure_height
lowerCAmelCase_ = get_distance(snake_case__ , snake_case__ , snake_case__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowerCAmelCase_ = get_color_coded_rgb(snake_case__ )
else:
lowerCAmelCase_ = get_black_and_white_rgb(snake_case__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_A = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 231 |
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = ["image_processor", "tokenizer"]
A = "OwlViTImageProcessor"
A = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ) -> str:
__UpperCamelCase : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _UpperCAmelCase , )
__UpperCamelCase : str = kwargs.pop("feature_extractor" )
__UpperCamelCase : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="max_length" , _UpperCAmelCase="np" , **_UpperCAmelCase ) -> str:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) or (isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not isinstance(text[0] , _UpperCAmelCase )):
__UpperCamelCase : Tuple = [self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )]
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(text[0] , _UpperCAmelCase ):
__UpperCamelCase : List[str] = []
# Maximum number of queries across batch
__UpperCamelCase : List[str] = max([len(_UpperCAmelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(_UpperCAmelCase ) != max_num_queries:
__UpperCamelCase : Any = t + [" "] * (max_num_queries - len(_UpperCAmelCase ))
__UpperCamelCase : int = self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
encodings.append(_UpperCAmelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
__UpperCamelCase : List[str] = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : int = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCamelCase : Tuple = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : Optional[Any] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCamelCase : Any = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
__UpperCamelCase : List[Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCamelCase : Any = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : Optional[Any] = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
__UpperCamelCase : Optional[Any] = BatchEncoding()
__UpperCamelCase : Union[str, Any] = input_ids
__UpperCamelCase : List[str] = attention_mask
if query_images is not None:
__UpperCamelCase : str = BatchEncoding()
__UpperCamelCase : Any = self.image_processor(
_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ).pixel_values
__UpperCamelCase : List[Any] = query_pixel_values
if images is not None:
__UpperCamelCase : Dict = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if text is not None and images is not None:
__UpperCamelCase : Optional[Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCamelCase : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]:
return self.image_processor.post_process(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[str]:
return self.image_processor.post_process_object_detection(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]:
return self.image_processor.post_process_image_guided_detection(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> int:
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def a_ (self ) -> Tuple:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _UpperCAmelCase , )
return self.image_processor_class
@property
def a_ (self ) -> Union[str, Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _UpperCAmelCase , )
return self.image_processor
| 298 | 0 |
__A = [0, 2, 4, 6, 8]
__A = [1, 3, 5, 7, 9]
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> int:
"""simple docstring"""
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
__lowerCamelCase = 0
for digit in range(10 ):
__lowerCamelCase = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , UpperCamelCase__ , UpperCamelCase__ )
return result
__lowerCamelCase = 0
for digita in range(10 ):
__lowerCamelCase = digita
if (remainder + digita) % 2 == 0:
__lowerCamelCase = ODD_DIGITS
else:
__lowerCamelCase = EVEN_DIGITS
for digita in other_parity_digits:
__lowerCamelCase = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , UpperCamelCase__ , UpperCamelCase__ , )
return result
def lowerCamelCase_ ( UpperCamelCase__ : int = 9 ) -> int:
"""simple docstring"""
__lowerCamelCase = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(UpperCamelCase__ , 0 , [0] * length , UpperCamelCase__ )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 368 |
__A = {
"joule": 1.0,
"kilojoule": 10_00,
"megajoule": 1_00_00_00,
"gigajoule": 10_00_00_00_00,
"wattsecond": 1.0,
"watthour": 36_00,
"kilowatthour": 3_60_00_00,
"newtonmeter": 1.0,
"calorie_nutr": 41_86.8,
"kilocalorie_nutr": 4_18_68_00.00,
"electronvolt": 1.6_0_2_1_7_6_6_3_4e-1_9,
"britishthermalunit_it": 10_55.0_55_85,
"footpound": 1.3_5_5_8_1_8,
}
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__lowerCamelCase = (
F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {', '.join(UpperCamelCase__ )}"""
)
raise ValueError(UpperCamelCase__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : Dict = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : str = ["""MBartTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = ["""MBartTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Any = [
"""MBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MBartForCausalLM""",
"""MBartForConditionalGeneration""",
"""MBartForQuestionAnswering""",
"""MBartForSequenceClassification""",
"""MBartModel""",
"""MBartPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : str = [
"""TFMBartForConditionalGeneration""",
"""TFMBartModel""",
"""TFMBartPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[Any] = [
"""FlaxMBartForConditionalGeneration""",
"""FlaxMBartForQuestionAnswering""",
"""FlaxMBartForSequenceClassification""",
"""FlaxMBartModel""",
"""FlaxMBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 174 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_UpperCAmelCase : Any = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 174 | 1 |
from math import factorial
def _snake_case( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> int:
'''simple docstring'''
if n < k or k < 0:
raise ValueError('Please enter positive integers for n and k where n >= k' )
return factorial(SCREAMING_SNAKE_CASE__ ) // (factorial(SCREAMING_SNAKE_CASE__ ) * factorial(n - k ))
if __name__ == "__main__":
print(
"The number of five-card hands possible from a standard",
f"""fifty-two card deck is: {combinations(52, 5)}\n""",
)
print(
"If a class of 40 students must be arranged into groups of",
f"""4 for group projects, there are {combinations(40, 4)} ways""",
"to arrange them.\n",
)
print(
"If 10 teams are competing in a Formula One race, there",
f"""are {combinations(10, 3)} ways that first, second and""",
"third place can be awarded.",
)
| 363 |
import random
def _snake_case( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : str ) -> tuple:
'''simple docstring'''
A__ , A__ , A__ = [], [], []
for element in data:
if element < pivot:
less.append(SCREAMING_SNAKE_CASE__ )
elif element > pivot:
greater.append(SCREAMING_SNAKE_CASE__ )
else:
equal.append(SCREAMING_SNAKE_CASE__ )
return less, equal, greater
def _snake_case( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : int ) -> str:
'''simple docstring'''
if index >= len(SCREAMING_SNAKE_CASE__ ) or index < 0:
return None
A__ = items[random.randint(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 )]
A__ = 0
A__ , A__ , A__ = _partition(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = len(SCREAMING_SNAKE_CASE__ )
A__ = len(SCREAMING_SNAKE_CASE__ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# must be in larger
else:
return quick_select(SCREAMING_SNAKE_CASE__ , index - (m + count) )
| 282 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : str = logging.get_logger(__name__)
__A : List[str] = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : List[str] = "convbert"
def __init__( self : int , A : Optional[int]=3_05_22 , A : Dict=7_68 , A : Dict=12 , A : Optional[int]=12 , A : Any=30_72 , A : Union[str, Any]="gelu" , A : Any=0.1 , A : Dict=0.1 , A : int=5_12 , A : int=2 , A : int=0.02 , A : Tuple=1e-12 , A : int=1 , A : int=0 , A : str=2 , A : Optional[int]=7_68 , A : List[Any]=2 , A : Optional[int]=9 , A : Optional[int]=1 , A : str=None , **A : List[Any] , ) -> Optional[Any]:
super().__init__(
pad_token_id=A , bos_token_id=A , eos_token_id=A , **A , )
lowercase_ : List[Any] = vocab_size
lowercase_ : str = hidden_size
lowercase_ : List[Any] = num_hidden_layers
lowercase_ : Any = num_attention_heads
lowercase_ : Dict = intermediate_size
lowercase_ : Optional[int] = hidden_act
lowercase_ : str = hidden_dropout_prob
lowercase_ : Dict = attention_probs_dropout_prob
lowercase_ : List[Any] = max_position_embeddings
lowercase_ : Any = type_vocab_size
lowercase_ : Optional[int] = initializer_range
lowercase_ : Optional[Any] = layer_norm_eps
lowercase_ : Dict = embedding_size
lowercase_ : Any = head_ratio
lowercase_ : Union[str, Any] = conv_kernel_size
lowercase_ : int = num_groups
lowercase_ : Dict = classifier_dropout
class _UpperCAmelCase ( _A ):
@property
def A ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowercase_ : Any = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase_ : Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 33 |
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
UpperCAmelCase : Tuple =2_9979_2458
# Symbols
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int =symbols("""ct x y z""")
def _lowerCAmelCase (_lowerCAmelCase):
if velocity > c:
raise ValueError("Speed must not exceed light speed 299,792,458 [m/s]!")
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("Speed must be greater than or equal to 1!")
return velocity / c
def _lowerCAmelCase (_lowerCAmelCase):
return 1 / sqrt(1 - beta(_lowerCAmelCase) ** 2)
def _lowerCAmelCase (_lowerCAmelCase):
return np.array(
[
[gamma(_lowerCAmelCase), -gamma(_lowerCAmelCase) * beta(_lowerCAmelCase), 0, 0],
[-gamma(_lowerCAmelCase) * beta(_lowerCAmelCase), gamma(_lowerCAmelCase), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
])
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase = None):
# Ensure event is not empty
if event is None:
UpperCamelCase_ = np.array([ct, x, y, z]) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(_lowerCAmelCase) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
UpperCAmelCase : Optional[Any] =transform(2997_9245)
print("""Example of four vector: """)
print(F"ct' = {four_vector[0]}")
print(F"x' = {four_vector[1]}")
print(F"y' = {four_vector[2]}")
print(F"z' = {four_vector[3]}")
# Substitute symbols with numerical values
UpperCAmelCase : List[Any] ={ct: c, x: 1, y: 1, z: 1}
UpperCAmelCase : Optional[Any] =[four_vector[i].subs(sub_dict) for i in range(4)]
print(F"\n{numerical_vector}")
| 128 | 0 |
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ):
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
lowerCamelCase__ = int(input('Enter number: ').strip())
print(F'''{number} is {"" if perfect(number) else "not "}a Perfect Number.''')
| 322 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowerCamelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : int ) ->str:
'''simple docstring'''
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Union[str, Any] = [label.strip() for label in labels.split("," ) if label.strip()]
return labels
def __call__( self : Union[str, Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : List[Any] ) ->str:
'''simple docstring'''
if len(lowerCamelCase__ ) == 0 or len(lowerCamelCase__ ) == 0:
raise ValueError("You must include at least one label and at least one sequence." )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. "
"Make sure the passed template includes formatting syntax such as {{}} where the label should go."
).format(lowerCamelCase__ ) )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Optional[Any] = [sequences]
_UpperCAmelCase : int = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(lowerCamelCase__ )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(UpperCAmelCase__ )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Union[str, Any] , lowerCamelCase__ : Optional[Any]=ZeroShotClassificationArgumentHandler() , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : Any ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = args_parser
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
if self.entailment_id == -1:
logger.warning(
"Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to "
"-1. Define a descriptive label2id mapping in the model config to ensure correct outputs." )
@property
def lowerCAmelCase__ ( self : Any ) ->Union[str, Any]:
'''simple docstring'''
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("entail" ):
return ind
return -1
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : int=True , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : str=TruncationStrategy.ONLY_FIRST , **lowerCamelCase__ : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : int = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"Tokenizer was not supporting padding necessary for zero-shot, attempting to use "
" `pad_token=eos_token`" )
_UpperCAmelCase : Optional[Any] = self.tokenizer.eos_token
try:
_UpperCAmelCase : List[str] = self.tokenizer(
lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , )
except Exception as e:
if "too short" in str(lowerCamelCase__ ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
_UpperCAmelCase : List[Any] = self.tokenizer(
lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def lowerCAmelCase__ ( self : int , **lowerCamelCase__ : Union[str, Any] ) ->Tuple:
'''simple docstring'''
if kwargs.get("multi_class" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : int = kwargs["multi_class"]
logger.warning(
"The `multi_class` argument has been deprecated and renamed to `multi_label`. "
"`multi_class` will be removed in a future version of Transformers." )
_UpperCAmelCase : Dict = {}
if "candidate_labels" in kwargs:
_UpperCAmelCase : List[Any] = self._args_parser._parse_labels(kwargs["candidate_labels"] )
if "hypothesis_template" in kwargs:
_UpperCAmelCase : Dict = kwargs["hypothesis_template"]
_UpperCAmelCase : List[str] = {}
if "multi_label" in kwargs:
_UpperCAmelCase : Optional[Any] = kwargs["multi_label"]
return preprocess_params, {}, postprocess_params
def __call__( self : int , lowerCamelCase__ : Union[str, List[str]] , *lowerCamelCase__ : str , **lowerCamelCase__ : Optional[Any] , ) ->Optional[int]:
'''simple docstring'''
if len(lowerCamelCase__ ) == 0:
pass
elif len(lowerCamelCase__ ) == 1 and "candidate_labels" not in kwargs:
_UpperCAmelCase : int = args[0]
else:
raise ValueError(F"""Unable to understand extra arguments {args}""" )
return super().__call__(lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any=None , lowerCamelCase__ : str="This example is {}." ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self._args_parser(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
for i, (candidate_label, sequence_pair) in enumerate(zip(lowerCamelCase__ , lowerCamelCase__ ) ):
_UpperCAmelCase : Optional[int] = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(lowerCamelCase__ ) - 1,
**model_input,
}
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Optional[int] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Dict = inputs["candidate_label"]
_UpperCAmelCase : Optional[int] = inputs["sequence"]
_UpperCAmelCase : Dict = {k: inputs[k] for k in self.tokenizer.model_input_names}
_UpperCAmelCase : List[Any] = self.model(**lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = {
"candidate_label": candidate_label,
"sequence": sequence,
"is_last": inputs["is_last"],
**outputs,
}
return model_outputs
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Tuple=False ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Any = [outputs["candidate_label"] for outputs in model_outputs]
_UpperCAmelCase : Any = [outputs["sequence"] for outputs in model_outputs]
_UpperCAmelCase : Optional[int] = np.concatenate([output["logits"].numpy() for output in model_outputs] )
_UpperCAmelCase : Optional[Any] = logits.shape[0]
_UpperCAmelCase : Any = len(lowerCamelCase__ )
_UpperCAmelCase : str = N // n
_UpperCAmelCase : str = logits.reshape((num_sequences, n, -1) )
if multi_label or len(lowerCamelCase__ ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
_UpperCAmelCase : int = self.entailment_id
_UpperCAmelCase : List[Any] = -1 if entailment_id == 0 else 0
_UpperCAmelCase : str = reshaped_outputs[..., [contradiction_id, entailment_id]]
_UpperCAmelCase : Union[str, Any] = np.exp(lowerCamelCase__ ) / np.exp(lowerCamelCase__ ).sum(-1 , keepdims=lowerCamelCase__ )
_UpperCAmelCase : str = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
_UpperCAmelCase : int = reshaped_outputs[..., self.entailment_id]
_UpperCAmelCase : Union[str, Any] = np.exp(lowerCamelCase__ ) / np.exp(lowerCamelCase__ ).sum(-1 , keepdims=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 322 | 1 |
'''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
snake_case_ : Tuple = False
try:
snake_case_ : Optional[Any] = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class __a :
def __init__( self : Optional[Any] , __magic_name__ : str = None , __magic_name__ : list = [] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = 0
UpperCAmelCase_ : List[Any] = choices
UpperCAmelCase_ : Any = prompt
if sys.platform == "win32":
UpperCAmelCase_ : List[Any] = """*"""
else:
UpperCAmelCase_ : List[str] = """➔ """
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : str , __magic_name__ : str = "" ) -> Union[str, Any]:
"""simple docstring"""
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , UpperCAmelCase__ )
else:
forceWrite(self.choices[index] , UpperCAmelCase__ )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : int ) -> Optional[int]:
"""simple docstring"""
if index == self.position:
forceWrite(F""" {self.arrow_char} """ )
self.write_choice(UpperCAmelCase__ )
else:
forceWrite(F""" {self.choices[index]}""" )
reset_cursor()
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Direction , __magic_name__ : int = 1 ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(UpperCAmelCase__ )
move_cursor(UpperCAmelCase__ , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['''up'''] )
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
self.move_direction(Direction.UP )
@input.mark(KEYMAP['''down'''] )
def UpperCAmelCase__ ( self : Any ) -> str:
"""simple docstring"""
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['''newline'''] )
def UpperCAmelCase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
return self.position
@input.mark(KEYMAP['''interrupt'''] )
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(UpperCAmelCase__ )] for number in range(10 )] )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = int(chr(self.current_selection ) )
UpperCAmelCase_ : Union[str, Any] = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , UpperCAmelCase__ )
else:
return
else:
return
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : int = 0 ) -> List[Any]:
"""simple docstring"""
if self.prompt:
linebreak()
forceWrite(self.prompt , '''\n''' )
if in_colab:
forceWrite('''Please input a choice index (starting from 0), and press enter''' , '''\n''' )
else:
forceWrite('''Please select a choice using the arrow or number keys, and selecting with enter''' , '''\n''' )
UpperCAmelCase_ : str = default_choice
for i in range(len(self.choices ) ):
self.print_choice(UpperCAmelCase__ )
forceWrite('''\n''' )
move_cursor(len(self.choices ) - self.position , '''UP''' )
with cursor.hide():
while True:
if in_colab:
try:
UpperCAmelCase_ : List[Any] = int(builtins.input() )
except ValueError:
UpperCAmelCase_ : List[Any] = default_choice
else:
UpperCAmelCase_ : Dict = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , '''UP''' )
clear_line()
self.write_choice(UpperCAmelCase__ , '''\n''' )
return choice
| 125 |
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class a__ :
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase__ : Optional[int] ) ->str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = data
SCREAMING_SNAKE_CASE : str = [0X67_45_23_01, 0XEF_CD_AB_89, 0X98_BA_DC_FE, 0X10_32_54_76, 0XC3_D2_E1_F0]
@staticmethod
def _lowercase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] ) ->Tuple:
"""simple docstring"""
return ((n << b) | (n >> (3_2 - b))) & 0XFF_FF_FF_FF
def _lowercase ( self : List[Any] ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = B"""\x80""" + B"""\x00""" * (6_3 - (len(self.data ) + 8) % 6_4)
SCREAMING_SNAKE_CASE : List[str] = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) )
return padded_data
def _lowercase ( self : Dict ) ->List[Any]:
"""simple docstring"""
return [
self.padded_data[i : i + 6_4] for i in range(0 , len(self.padded_data ) , 6_4 )
]
def _lowercase ( self : int , UpperCAmelCase__ : Any ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = list(struct.unpack(""">16L""" , UpperCAmelCase__ ) ) + [0] * 6_4
for i in range(1_6 , 8_0 ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 1_4] ^ w[i - 1_6]) , 1 )
return w
def _lowercase ( self : Any ) ->List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.padding()
SCREAMING_SNAKE_CASE : Any = self.split_blocks()
for block in self.blocks:
SCREAMING_SNAKE_CASE : str = self.expand_block(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.h
for i in range(0 , 8_0 ):
if 0 <= i < 2_0:
SCREAMING_SNAKE_CASE : List[str] = (b & c) | ((~b) & d)
SCREAMING_SNAKE_CASE : str = 0X5A_82_79_99
elif 2_0 <= i < 4_0:
SCREAMING_SNAKE_CASE : List[Any] = b ^ c ^ d
SCREAMING_SNAKE_CASE : Any = 0X6E_D9_EB_A1
elif 4_0 <= i < 6_0:
SCREAMING_SNAKE_CASE : Union[str, Any] = (b & c) | (b & d) | (c & d)
SCREAMING_SNAKE_CASE : List[str] = 0X8F_1B_BC_DC
elif 6_0 <= i < 8_0:
SCREAMING_SNAKE_CASE : Dict = b ^ c ^ d
SCREAMING_SNAKE_CASE : int = 0XCA_62_C1_D6
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = (
self.rotate(UpperCAmelCase__ , 5 ) + f + e + k + expanded_block[i] & 0XFF_FF_FF_FF,
a,
self.rotate(UpperCAmelCase__ , 3_0 ),
c,
d,
)
SCREAMING_SNAKE_CASE : Union[str, Any] = (
self.h[0] + a & 0XFF_FF_FF_FF,
self.h[1] + b & 0XFF_FF_FF_FF,
self.h[2] + c & 0XFF_FF_FF_FF,
self.h[3] + d & 0XFF_FF_FF_FF,
self.h[4] + e & 0XFF_FF_FF_FF,
)
return ("{:08x}" * 5).format(*self.h )
def __lowercase ( ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = B"""Test String"""
assert SHAaHash(_A ).final_hash() == hashlib.shaa(_A ).hexdigest() # noqa: S324
def __lowercase ( ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument("""--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE : Dict = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
SCREAMING_SNAKE_CASE : List[str] = f.read()
else:
SCREAMING_SNAKE_CASE : Tuple = bytes(_A , """utf-8""" )
print(SHAaHash(_A ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 245 | 0 |
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def lowercase ( _snake_case : Union[str, Any]=None ) ->List[str]:
"""simple docstring"""
if subparsers is not None:
__snake_case : Optional[int] = subparsers.add_parser('''test''' )
else:
__snake_case : Tuple = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=__UpperCAmelCase , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=__UpperCAmelCase )
return parser
def lowercase ( _snake_case : Dict ) ->List[str]:
"""simple docstring"""
__snake_case : Tuple = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
__snake_case : Optional[Any] = script_name
else:
__snake_case : List[Any] = f"""--config_file={args.config_file} {script_name}"""
__snake_case : List[Any] = ['''accelerate-launch'''] + test_args.split()
__snake_case : Dict = execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def lowercase ( ) ->Optional[int]:
"""simple docstring"""
__snake_case : Any = test_command_parser()
__snake_case : Dict = parser.parse_args()
test_command(__UpperCAmelCase )
if __name__ == "__main__":
main()
| 355 |
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=24 , a_=2 , a_=6 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=None , a_=10_00 , ):
'''simple docstring'''
__snake_case : Any = parent
__snake_case : int = batch_size
__snake_case : Dict = seq_length
__snake_case : List[str] = is_training
__snake_case : List[Any] = use_input_mask
__snake_case : int = use_token_type_ids
__snake_case : Union[str, Any] = use_labels
__snake_case : str = vocab_size
__snake_case : int = hidden_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : int = num_attention_heads
__snake_case : str = intermediate_size
__snake_case : Union[str, Any] = hidden_act
__snake_case : int = hidden_dropout_prob
__snake_case : Union[str, Any] = attention_probs_dropout_prob
__snake_case : List[Any] = max_position_embeddings
__snake_case : Any = type_vocab_size
__snake_case : Dict = type_sequence_label_size
__snake_case : Optional[Any] = initializer_range
__snake_case : Union[str, Any] = num_labels
__snake_case : Any = scope
__snake_case : Any = range_bbox
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : int = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__snake_case : List[str] = bbox[i, j, 3]
__snake_case : Any = bbox[i, j, 1]
__snake_case : Tuple = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__snake_case : List[str] = bbox[i, j, 2]
__snake_case : Union[str, Any] = bbox[i, j, 0]
__snake_case : Dict = t
__snake_case : Optional[int] = None
if self.use_input_mask:
__snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__snake_case : Dict = None
if self.use_token_type_ids:
__snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : List[str] = None
__snake_case : Union[str, Any] = None
if self.use_labels:
__snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : List[Any] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
'''simple docstring'''
__snake_case : Union[str, Any] = LiltModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Any = model(a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ )
__snake_case : str = model(a_ , bbox=a_ , token_type_ids=a_ )
__snake_case : List[str] = model(a_ , bbox=a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
'''simple docstring'''
__snake_case : Optional[int] = self.num_labels
__snake_case : List[str] = LiltForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Tuple = model(
a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
'''simple docstring'''
__snake_case : Optional[Any] = LiltForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
__snake_case : int = model(
a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Dict = config_and_inputs
__snake_case : Any = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( __snake_case, __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase__ =(
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ =False
lowerCamelCase__ =False
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
return True
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = LiltModelTester(self )
__snake_case : Optional[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case : Dict = type
self.model_tester.create_and_check_model(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Any = LiltModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_torch
@slow
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(a_ )
__snake_case : Dict = torch.tensor([[1, 2]] , device=a_ )
__snake_case : str = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=a_ )
# forward pass
with torch.no_grad():
__snake_case : Union[str, Any] = model(input_ids=a_ , bbox=a_ )
__snake_case : Union[str, Any] = torch.Size([1, 2, 7_68] )
__snake_case : str = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=a_ , )
self.assertTrue(outputs.last_hidden_state.shape , a_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , a_ , atol=1E-3 ) )
| 24 | 0 |
def __UpperCamelCase ( _A ):
return "".join(chr(ord(lowerCAmelCase__ ) - 32 ) if '''a''' <= char <= '''z''' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 278 |
"""simple docstring"""
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowercase__ : List[Any] = logging.getLogger(__name__)
def UpperCamelCase_ ( ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ : Tuple = argparse.ArgumentParser(
description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' )
parser.add_argument(
'--dataset_name' , type=lowerCAmelCase__ , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , )
parser.add_argument(
'--dataset_config' , type=lowerCAmelCase__ , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' )
parser.add_argument(
'--tokenizer_name_or_path' , type=lowerCAmelCase__ , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , )
parser.add_argument(
'--shard_size' , type=lowerCAmelCase__ , default=1000 , help='Number of entries to go in a single shard.' , )
parser.add_argument('--split' , type=lowerCAmelCase__ , default='train' , choices=['train', 'test', 'validation'] )
parser.add_argument(
'--limit' , default=lowerCAmelCase__ , type=lowerCAmelCase__ , help='Limit the number of shards (used for debugging).' , )
parser.add_argument(
'--max_length' , type=lowerCAmelCase__ , default=512 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum'
' sequence length that is a multiple of 8.' , )
parser.add_argument(
'--output_dir' , default='tf-tpu' , type=lowerCAmelCase__ , help='Output directory where the TFRecord shards will be saved. If the'
' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'
' shards will be directly saved to a Google Cloud Storage bucket.' , )
lowerCAmelCase_ : List[Any] = parser.parse_args()
return args
def UpperCamelCase_ ( lowerCAmelCase__ : int ) -> Union[str, Any]:
"""simple docstring"""
def fn(lowerCAmelCase__ : Optional[Any] ):
return tokenizer(examples['text'] )
return fn
def UpperCamelCase_ ( lowerCAmelCase__ : Tuple ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ : int = []
for i in range(len(tokenized_data['input_ids'] ) ):
lowerCAmelCase_ : Tuple = {
'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ),
'attention_mask': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ),
}
lowerCAmelCase_ : Union[str, Any] = tf.train.Features(feature=lowerCAmelCase__ )
lowerCAmelCase_ : Dict = tf.train.Example(features=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = example.SerializeToString()
records.append(lowerCAmelCase__ )
return records
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase_ : str = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowerCAmelCase_ : Tuple = min(len(lowerCAmelCase__ ) , args.limit )
lowerCAmelCase_ : Any = dataset.select(range(lowerCAmelCase__ ) )
print(f"Limiting the dataset to {args.limit} entries." )
lowerCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowerCAmelCase_ : int = os.path.join(args.output_dir , args.split )
if not os.path.exists(lowerCAmelCase__ ):
os.makedirs(lowerCAmelCase__ )
else:
lowerCAmelCase_ : Dict = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowerCAmelCase_ : Dict = tokenize_function(lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = dataset.map(lowerCAmelCase__ , batched=lowerCAmelCase__ , num_proc=4 , remove_columns=['text'] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(lowerCAmelCase__ : List[Any] ):
# Concatenate all texts.
lowerCAmelCase_ : int = {k: sum(examples[k] , [] ) for k in examples.keys()}
lowerCAmelCase_ : Any = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowerCAmelCase_ : Union[str, Any] = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowerCAmelCase_ : Optional[Any] = {
k: [t[i : i + args.max_length] for i in range(0 , lowerCAmelCase__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowerCAmelCase_ : Optional[int] = dataset_tokenized.map(lowerCAmelCase__ , batched=lowerCAmelCase__ , batch_size=1000 , num_proc=4 )
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : Optional[Any] = 0
for shard in range(0 , len(lowerCAmelCase__ ) , args.shard_size ):
lowerCAmelCase_ : Dict = grouped_dataset[shard : shard + args.shard_size]
lowerCAmelCase_ : Tuple = len(dataset_snapshot['input_ids'] )
lowerCAmelCase_ : Optional[Any] = os.path.join(lowerCAmelCase__ , f"dataset-{shard_count}-{records_containing}.tfrecord" )
lowerCAmelCase_ : Tuple = get_serialized_examples(lowerCAmelCase__ )
with tf.io.TFRecordWriter(lowerCAmelCase__ ) as out_file:
for i in range(len(lowerCAmelCase__ ) ):
lowerCAmelCase_ : Dict = serialized_examples[i]
out_file.write(lowerCAmelCase__ )
print('Wrote file {} containing {} records'.format(lowerCAmelCase__ , lowerCAmelCase__ ) )
shard_count += 1
total_records += records_containing
with open(f"split-{args.split}-records-count.txt" , 'w' ) as f:
print(f"Total {args.split} records: {total_records}" , file=lowerCAmelCase__ )
if __name__ == "__main__":
lowercase__ : int = parse_args()
main(args)
| 224 | 0 |
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__A : Dict = False
__A : Optional[Any] = logging.get_logger(__name__)
__A : Tuple = '''ybelkada/fonts'''
def SCREAMING_SNAKE_CASE__ ( ) -> str:
'''simple docstring'''
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
f"You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use "
'Pix2StructImageProcessor. Please upgrade torch.' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(_UpperCAmelCase, ['torch'] )
_check_torch_version()
lowerCAmelCase : str = image_tensor.unsqueeze(0 )
lowerCAmelCase : Union[str, Any] = torch.nn.functional.unfold(_UpperCAmelCase, (patch_height, patch_width), stride=(patch_height, patch_width) )
lowerCAmelCase : Union[str, Any] = patches.reshape(image_tensor.size(0 ), image_tensor.size(1 ), _UpperCAmelCase, _UpperCAmelCase, -1 )
lowerCAmelCase : Tuple = patches.permute(0, 4, 2, 3, 1 ).reshape(
image_tensor.size(2 ) // patch_height, image_tensor.size(3 ) // patch_width, image_tensor.size(1 ) * patch_height * patch_width, )
return patches.unsqueeze(0 )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase = 36, _UpperCAmelCase = "black", _UpperCAmelCase = "white", _UpperCAmelCase = 5, _UpperCAmelCase = 5, _UpperCAmelCase = 5, _UpperCAmelCase = 5, _UpperCAmelCase = None, _UpperCAmelCase = None, ) -> Image.Image:
'''simple docstring'''
requires_backends(_UpperCAmelCase, 'vision' )
# Add new lines so that each line is no more than 80 characters.
lowerCAmelCase : int = textwrap.TextWrapper(width=80 )
lowerCAmelCase : Any = wrapper.wrap(text=_UpperCAmelCase )
lowerCAmelCase : Union[str, Any] = '\n'.join(_UpperCAmelCase )
if font_bytes is not None and font_path is None:
lowerCAmelCase : int = io.BytesIO(_UpperCAmelCase )
elif font_path is not None:
lowerCAmelCase : List[Any] = font_path
else:
lowerCAmelCase : str = hf_hub_download(_UpperCAmelCase, 'Arial.TTF' )
lowerCAmelCase : List[Any] = ImageFont.truetype(_UpperCAmelCase, encoding='UTF-8', size=_UpperCAmelCase )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
lowerCAmelCase : str = ImageDraw.Draw(Image.new('RGB', (1, 1), _UpperCAmelCase ) )
lowerCAmelCase : Dict = temp_draw.textbbox((0, 0), _UpperCAmelCase, _UpperCAmelCase )
# Create the actual image with a bit of padding around the text.
lowerCAmelCase : List[str] = text_width + left_padding + right_padding
lowerCAmelCase : Dict = text_height + top_padding + bottom_padding
lowerCAmelCase : Dict = Image.new('RGB', (image_width, image_height), _UpperCAmelCase )
lowerCAmelCase : Optional[int] = ImageDraw.Draw(_UpperCAmelCase )
draw.text(xy=(left_padding, top_padding), text=_UpperCAmelCase, fill=_UpperCAmelCase, font=_UpperCAmelCase )
return image
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(_UpperCAmelCase, 'vision' )
# Convert to PIL image if necessary
lowerCAmelCase : List[Any] = to_pil_image(_UpperCAmelCase )
lowerCAmelCase : int = render_text(_UpperCAmelCase, **_UpperCAmelCase )
lowerCAmelCase : List[str] = max(header_image.width, image.width )
lowerCAmelCase : str = int(image.height * (new_width / image.width) )
lowerCAmelCase : Tuple = int(header_image.height * (new_width / header_image.width) )
lowerCAmelCase : str = Image.new('RGB', (new_width, new_height + new_header_height), 'white' )
new_image.paste(header_image.resize((new_width, new_header_height) ), (0, 0) )
new_image.paste(image.resize((new_width, new_height) ), (0, new_header_height) )
# Convert back to the original framework if necessary
lowerCAmelCase : List[str] = to_numpy_array(_UpperCAmelCase )
if infer_channel_dimension_format(_UpperCAmelCase ) == ChannelDimension.LAST:
lowerCAmelCase : Tuple = to_channel_dimension_format(_UpperCAmelCase, ChannelDimension.LAST )
return new_image
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : str = ["flattened_patches"]
def __init__( self : Optional[int] , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : int = 2048 , UpperCAmelCase_ : bool = False , **UpperCAmelCase_ : Optional[int] , ):
super().__init__(**UpperCAmelCase_ )
lowerCAmelCase : List[Any] = patch_size if patch_size is not None else {'height': 16, 'width': 16}
lowerCAmelCase : Dict = do_normalize
lowerCAmelCase : Optional[Any] = do_convert_rgb
lowerCAmelCase : List[str] = max_patches
lowerCAmelCase : Any = is_vqa
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : int , UpperCAmelCase_ : dict , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(self.extract_flattened_patches , 'torch' )
_check_torch_version()
# convert to torch
lowerCAmelCase : Dict = to_channel_dimension_format(UpperCAmelCase_ , ChannelDimension.FIRST )
lowerCAmelCase : Optional[Any] = torch.from_numpy(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = patch_size['height'], patch_size['width']
lowerCAmelCase : str = get_image_size(UpperCAmelCase_ )
# maximize scale s.t.
lowerCAmelCase : str = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
lowerCAmelCase : List[str] = max(min(math.floor(scale * image_height / patch_height ) , UpperCAmelCase_ ) , 1 )
lowerCAmelCase : int = max(min(math.floor(scale * image_width / patch_width ) , UpperCAmelCase_ ) , 1 )
lowerCAmelCase : Dict = max(num_feasible_rows * patch_height , 1 )
lowerCAmelCase : str = max(num_feasible_cols * patch_width , 1 )
lowerCAmelCase : Tuple = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode='bilinear' , align_corners=UpperCAmelCase_ , antialias=UpperCAmelCase_ , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
lowerCAmelCase : Any = torch_extract_patches(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = patches.shape
lowerCAmelCase : Union[str, Any] = patches_shape[1]
lowerCAmelCase : List[str] = patches_shape[2]
lowerCAmelCase : Optional[Any] = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
lowerCAmelCase : Tuple = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
lowerCAmelCase : Dict = torch.arange(UpperCAmelCase_ ).reshape([rows, 1] ).repeat(1 , UpperCAmelCase_ ).reshape([rows * columns, 1] )
lowerCAmelCase : Optional[Any] = torch.arange(UpperCAmelCase_ ).reshape([1, columns] ).repeat(UpperCAmelCase_ , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
lowerCAmelCase : List[Any] = row_ids.to(torch.floataa )
lowerCAmelCase : Optional[int] = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
lowerCAmelCase : Optional[Any] = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
lowerCAmelCase : str = torch.nn.functional.pad(UpperCAmelCase_ , [0, 0, 0, max_patches - (rows * columns)] ).float()
lowerCAmelCase : Dict = to_numpy_array(UpperCAmelCase_ )
return result
def lowercase__ ( self : List[str] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[Any] ):
if image.dtype == np.uinta:
lowerCAmelCase : List[str] = image.astype(np.floataa )
# take mean across the whole `image`
lowerCAmelCase : Union[str, Any] = np.mean(UpperCAmelCase_ )
lowerCAmelCase : Dict = np.std(UpperCAmelCase_ )
lowerCAmelCase : List[str] = max(UpperCAmelCase_ , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , **UpperCAmelCase_ )
def lowercase__ ( self : str , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : Optional[str] = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase_ : int , ):
lowerCAmelCase : Any = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase : List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCAmelCase : Any = patch_size if patch_size is not None else self.patch_size
lowerCAmelCase : Union[str, Any] = max_patches if max_patches is not None else self.max_patches
lowerCAmelCase : Union[str, Any] = self.is_vqa
if kwargs.get('data_format' , UpperCAmelCase_ ) is not None:
raise ValueError('data_format is not an accepted input as the outputs are ' )
lowerCAmelCase : Optional[Any] = make_list_of_images(UpperCAmelCase_ )
if not valid_images(UpperCAmelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCAmelCase : Optional[int] = [convert_to_rgb(UpperCAmelCase_ ) for image in images]
# All transformations expect numpy arrays.
lowerCAmelCase : str = [to_numpy_array(UpperCAmelCase_ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('A header text must be provided for VQA models.' )
lowerCAmelCase : Tuple = kwargs.pop('font_bytes' , UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = kwargs.pop('font_path' , UpperCAmelCase_ )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase : Dict = [header_text] * len(UpperCAmelCase_ )
lowerCAmelCase : List[str] = [
render_header(UpperCAmelCase_ , header_text[i] , font_bytes=UpperCAmelCase_ , font_path=UpperCAmelCase_ )
for i, image in enumerate(UpperCAmelCase_ )
]
if do_normalize:
lowerCAmelCase : Any = [self.normalize(image=UpperCAmelCase_ ) for image in images]
# convert to torch tensor and permute
lowerCAmelCase : List[Any] = [
self.extract_flattened_patches(image=UpperCAmelCase_ , max_patches=UpperCAmelCase_ , patch_size=UpperCAmelCase_ )
for image in images
]
# create attention mask in numpy
lowerCAmelCase : Dict = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
lowerCAmelCase : Dict = BatchFeature(
data={'flattened_patches': images, 'attention_mask': attention_masks} , tensor_type=UpperCAmelCase_ )
return encoded_outputs
| 355 |
import math
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase = 100 ) -> int:
'''simple docstring'''
lowerCAmelCase : Any = sum(i * i for i in range(1, n + 1 ) )
lowerCAmelCase : str = int(math.pow(sum(range(1, n + 1 ) ), 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 323 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A : Optional[Any] = logging.get_logger(__name__)
class A (lowerCamelCase__ ):
'''simple docstring'''
__lowerCamelCase : List[str] = ["""pixel_values"""]
def __init__( self : int , __lowerCAmelCase : Tuple = True , __lowerCAmelCase : int = None , __lowerCAmelCase : Any = None , __lowerCAmelCase : Optional[int] = PILImageResampling.BILINEAR , __lowerCAmelCase : List[str] = True , __lowerCAmelCase : Any = 1 / 2_55 , __lowerCAmelCase : List[str] = True , __lowerCAmelCase : int = None , __lowerCAmelCase : Optional[Any] = None , **__lowerCAmelCase : Optional[int] , ) -> None:
"""simple docstring"""
super().__init__(**snake_case__ )
A__ = size if size is not None else {'''shortest_edge''': 3_84}
A__ = get_size_dict(snake_case__ , default_to_square=snake_case__ )
A__ = do_resize
A__ = size
# Default value set here for backwards compatibility where the value in config is None
A__ = crop_pct if crop_pct is not None else 2_24 / 2_56
A__ = resample
A__ = do_rescale
A__ = rescale_factor
A__ = do_normalize
A__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a_ ( self : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : str = PILImageResampling.BICUBIC , __lowerCAmelCase : Optional[Any] = None , **__lowerCAmelCase : str , ) -> np.ndarray:
"""simple docstring"""
A__ = get_size_dict(snake_case__ , default_to_square=snake_case__ )
if "shortest_edge" not in size:
raise ValueError(f'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
A__ = size['''shortest_edge''']
if shortest_edge < 3_84:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
A__ = int(shortest_edge / crop_pct )
A__ = get_resize_output_image_size(snake_case__ , size=snake_case__ , default_to_square=snake_case__ )
A__ = resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=snake_case__ , size=(shortest_edge, shortest_edge) , data_format=snake_case__ , **snake_case__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
snake_case__ , size=(shortest_edge, shortest_edge) , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
def a_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : int = None , **__lowerCAmelCase : List[Any] , ) -> int:
"""simple docstring"""
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def a_ ( self : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] = None , **__lowerCAmelCase : List[Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(snake_case__ , mean=snake_case__ , std=snake_case__ , data_format=snake_case__ , **snake_case__ )
def a_ ( self : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] = None , __lowerCAmelCase : Optional[Any] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : int = None , __lowerCAmelCase : Dict = None , __lowerCAmelCase : Optional[Any] = None , __lowerCAmelCase : str = None , __lowerCAmelCase : int = None , __lowerCAmelCase : Any = None , __lowerCAmelCase : Optional[Any] = ChannelDimension.FIRST , **__lowerCAmelCase : int , ) -> PIL.Image.Image:
"""simple docstring"""
A__ = do_resize if do_resize is not None else self.do_resize
A__ = crop_pct if crop_pct is not None else self.crop_pct
A__ = resample if resample is not None else self.resample
A__ = do_rescale if do_rescale is not None else self.do_rescale
A__ = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = image_mean if image_mean is not None else self.image_mean
A__ = image_std if image_std is not None else self.image_std
A__ = size if size is not None else self.size
A__ = get_size_dict(snake_case__ , default_to_square=snake_case__ )
A__ = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 3_84 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A__ = [to_numpy_array(snake_case__ ) for image in images]
if do_resize:
A__ = [self.resize(image=snake_case__ , size=snake_case__ , crop_pct=snake_case__ , resample=snake_case__ ) for image in images]
if do_rescale:
A__ = [self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
if do_normalize:
A__ = [self.normalize(image=snake_case__ , mean=snake_case__ , std=snake_case__ ) for image in images]
A__ = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
A__ = {'''pixel_values''': images}
return BatchFeature(data=snake_case__ , tensor_type=snake_case__ )
| 274 | from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase_ ( __lowerCAmelCase )-> Optional[Any]:
'''simple docstring'''
def is_in_circle(__lowerCAmelCase , __lowerCAmelCase ) -> bool:
UpperCAmelCase : List[Any] =sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
UpperCAmelCase : List[Any] =mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(__lowerCAmelCase ) )
# The ratio of the area for circle to square is pi/4.
UpperCAmelCase : Dict =proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1.0 , )-> float:
'''simple docstring'''
return mean(
function_to_integrate(uniform(__lowerCAmelCase , __lowerCAmelCase ) ) for _ in range(__lowerCAmelCase ) ) * (max_value - min_value)
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1.0 )-> None:
'''simple docstring'''
def identity_function(__lowerCAmelCase ) -> float:
return x
UpperCAmelCase : List[Any] =area_under_curve_estimator(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
UpperCAmelCase : Dict =(max_value * max_value - min_value * min_value) / 2
print('''******************''' )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print('''******************''' )
def lowerCAmelCase_ ( __lowerCAmelCase )-> None:
'''simple docstring'''
def function_to_integrate(__lowerCAmelCase ) -> float:
return sqrt(4.0 - x * x )
UpperCAmelCase : Dict =area_under_curve_estimator(
__lowerCAmelCase , __lowerCAmelCase , 0.0 , 2.0 )
print('''******************''' )
print('''Estimating pi using area_under_curve_estimator''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print('''******************''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348 | 0 |
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 354 |
from math import log
from scipy.constants import Boltzmann, physical_constants
_lowerCamelCase : Tuple = 3_0_0 # TEMPERATURE (unit = K)
def a__ ( UpperCAmelCase : float , UpperCAmelCase : float , UpperCAmelCase : float , ) -> float:
if donor_conc <= 0:
raise ValueError('''Donor concentration should be positive''' )
elif acceptor_conc <= 0:
raise ValueError('''Acceptor concentration should be positive''' )
elif intrinsic_conc <= 0:
raise ValueError('''Intrinsic concentration should be positive''' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'''Donor concentration should be greater than intrinsic concentration''' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'''Acceptor concentration should be greater than intrinsic concentration''' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 99 | 0 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class A :
def __init__( self : Union[str, Any] , lowercase_ : Any , lowercase_ : Dict=13 , lowercase_ : List[Any]=7 , lowercase_ : List[Any]=True , lowercase_ : Any=True , lowercase_ : Optional[int]=False , lowercase_ : Tuple=True , lowercase_ : List[str]=99 , lowercase_ : Union[str, Any]=32 , lowercase_ : List[Any]=5 , lowercase_ : Dict=4 , lowercase_ : List[Any]=37 , lowercase_ : Any="gelu" , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : List[Any]=512 , lowercase_ : Any=16 , lowercase_ : Optional[int]=2 , lowercase_ : Any=0.02 , lowercase_ : Dict=3 , lowercase_ : Union[str, Any]=4 , lowercase_ : Tuple=None , ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] =parent
_lowerCamelCase : Tuple =batch_size
_lowerCamelCase : Any =seq_length
_lowerCamelCase : int =is_training
_lowerCamelCase : int =use_input_mask
_lowerCamelCase : List[str] =use_token_type_ids
_lowerCamelCase : Dict =use_labels
_lowerCamelCase : int =vocab_size
_lowerCamelCase : Optional[Any] =hidden_size
_lowerCamelCase : Union[str, Any] =num_hidden_layers
_lowerCamelCase : Any =num_attention_heads
_lowerCamelCase : Tuple =intermediate_size
_lowerCamelCase : List[str] =hidden_act
_lowerCamelCase : int =hidden_dropout_prob
_lowerCamelCase : Optional[int] =attention_probs_dropout_prob
_lowerCamelCase : Any =max_position_embeddings
_lowerCamelCase : Optional[Any] =type_vocab_size
_lowerCamelCase : List[Any] =type_sequence_label_size
_lowerCamelCase : Union[str, Any] =initializer_range
_lowerCamelCase : Dict =num_labels
_lowerCamelCase : Optional[Any] =num_choices
_lowerCamelCase : Dict =scope
def lowerCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_lowerCamelCase : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : Union[str, Any] =None
if self.use_input_mask:
_lowerCamelCase : Dict =random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : str =None
if self.use_token_type_ids:
_lowerCamelCase : Dict =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase : Tuple =None
_lowerCamelCase : Optional[Any] =None
_lowerCamelCase : Optional[Any] =None
if self.use_labels:
_lowerCamelCase : int =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase : List[Any] =ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase : Optional[Any] =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
def lowerCamelCase ( self : Tuple , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : Dict =LlamaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
_lowerCamelCase : List[str] =model(lowercase_ , attention_mask=lowercase_ )
_lowerCamelCase : List[str] =model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self : int , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Any , lowercase_ : str , lowercase_ : Dict , lowercase_ : str , ) -> Any:
"""simple docstring"""
_lowerCamelCase : str =True
_lowerCamelCase : str =LlamaModel(lowercase_ )
model.to(lowercase_ )
model.eval()
_lowerCamelCase : int =model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
_lowerCamelCase : str =model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , )
_lowerCamelCase : List[Any] =model(lowercase_ , attention_mask=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self : Dict , lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : str , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Any , ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : int =LlamaForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
_lowerCamelCase : int =model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self : int , lowercase_ : str , lowercase_ : Dict , lowercase_ : str , lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , ) -> List[str]:
"""simple docstring"""
_lowerCamelCase : List[str] =True
_lowerCamelCase : Tuple =True
_lowerCamelCase : str =LlamaForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
# first forward pass
_lowerCamelCase : int =model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , )
_lowerCamelCase : Any =outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowerCamelCase : Union[str, Any] =ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCamelCase : Optional[Any] =ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_lowerCamelCase : Tuple =torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCamelCase : Optional[Any] =torch.cat([input_mask, next_mask] , dim=-1 )
_lowerCamelCase : Optional[int] =model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
_lowerCamelCase : Tuple =model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
# select random slice
_lowerCamelCase : int =ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCamelCase : Optional[Any] =output_from_no_past[:, -3:, random_slice_idx].detach()
_lowerCamelCase : Optional[int] =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
def lowerCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
_lowerCamelCase : int =self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Optional[Any] =config_and_inputs
_lowerCamelCase : Union[str, Any] ={'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
UpperCamelCase__ : Optional[Any] =(LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
UpperCamelCase__ : Union[str, Any] =(LlamaForCausalLM,) if is_torch_available() else ()
UpperCamelCase__ : List[Any] =(
{
'feature-extraction': LlamaModel,
'text-classification': LlamaForSequenceClassification,
'text-generation': LlamaForCausalLM,
'zero-shot': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ : Optional[Any] =False
UpperCamelCase__ : Tuple =False
def lowerCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_lowerCamelCase : Optional[Any] =LlamaModelTester(self )
_lowerCamelCase : Any =ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def lowerCamelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase ( self : Dict ) -> str:
"""simple docstring"""
_lowerCamelCase : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def lowerCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : List[str] =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCamelCase : Tuple =type
self.model_tester.create_and_check_model(*lowercase_ )
def lowerCamelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : str =3
_lowerCamelCase : str =input_dict['input_ids']
_lowerCamelCase : int =input_ids.ne(1 ).to(lowercase_ )
_lowerCamelCase : Optional[Any] =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowerCamelCase : Union[str, Any] =LlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
_lowerCamelCase : Dict =model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[Any] =3
_lowerCamelCase : List[Any] ='single_label_classification'
_lowerCamelCase : List[str] =input_dict['input_ids']
_lowerCamelCase : Any =input_ids.ne(1 ).to(lowercase_ )
_lowerCamelCase : List[str] =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowerCamelCase : str =LlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
_lowerCamelCase : Optional[int] =model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : int =3
_lowerCamelCase : Optional[Any] ='multi_label_classification'
_lowerCamelCase : str =input_dict['input_ids']
_lowerCamelCase : Tuple =input_ids.ne(1 ).to(lowercase_ )
_lowerCamelCase : Tuple =ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_lowerCamelCase : Optional[Any] =LlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
_lowerCamelCase : Optional[Any] =model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def lowerCamelCase ( self : int ) -> str:
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def lowerCamelCase ( self : Optional[int] , lowercase_ : List[Any] ) -> int:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Union[str, Any] =ids_tensor([1, 10] , config.vocab_size )
_lowerCamelCase : List[str] =ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_lowerCamelCase : Optional[int] =LlamaModel(lowercase_ )
original_model.to(lowercase_ )
original_model.eval()
_lowerCamelCase : List[Any] =original_model(lowercase_ ).last_hidden_state
_lowerCamelCase : Dict =original_model(lowercase_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_lowerCamelCase : Dict ={'type': scaling_type, 'factor': 10.0}
_lowerCamelCase : int =LlamaModel(lowercase_ )
scaled_model.to(lowercase_ )
scaled_model.eval()
_lowerCamelCase : Optional[int] =scaled_model(lowercase_ ).last_hidden_state
_lowerCamelCase : Dict =scaled_model(lowercase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) )
@require_torch
class A ( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCamelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[str] =[1, 306, 4658, 278, 6593, 310, 2834, 338]
_lowerCamelCase : Optional[Any] =LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
_lowerCamelCase : int =model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
_lowerCamelCase : int =torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , lowercase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_lowerCamelCase : Dict =torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowercase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCamelCase ( self : int ) -> Any:
"""simple docstring"""
_lowerCamelCase : Tuple =[1, 306, 4658, 278, 6593, 310, 2834, 338]
_lowerCamelCase : Optional[Any] =LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
_lowerCamelCase : List[Any] =model(torch.tensor(lowercase_ ) )
# Expected mean on dim = -1
_lowerCamelCase : str =torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , lowercase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_lowerCamelCase : List[str] =torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowercase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_lowerCamelCase : Optional[Any] =[1, 306, 4658, 278, 6593, 310, 2834, 338]
_lowerCamelCase : str =LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
_lowerCamelCase : int =model(torch.tensor(lowercase_ ) )
# Expected mean on dim = -1
_lowerCamelCase : str =torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , lowercase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_lowerCamelCase : Union[str, Any] =torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , lowercase_ , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def lowerCamelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase : List[str] =[1, 306, 4658, 278, 6593, 310, 2834, 338]
_lowerCamelCase : Any =LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
_lowerCamelCase : Optional[Any] =model(torch.tensor(lowercase_ ) )
_lowerCamelCase : Optional[int] =torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , lowercase_ , atol=1E-2 , rtol=1E-2 )
# fmt: off
_lowerCamelCase : int =torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowercase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def lowerCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : Tuple ='Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
_lowerCamelCase : Union[str, Any] ='Simply put, the theory of relativity states that '
_lowerCamelCase : int =LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
_lowerCamelCase : str =tokenizer.encode(lowercase_ , return_tensors='pt' )
_lowerCamelCase : List[Any] =LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=lowercase_ )
# greedy generation outputs
_lowerCamelCase : str =model.generate(lowercase_ , max_new_tokens=64 , top_p=lowercase_ , temperature=1 , do_sample=lowercase_ )
_lowerCamelCase : Tuple =tokenizer.decode(generated_ids[0] , skip_special_tokens=lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
| 199 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class A ( UpperCamelCase_ , unittest.TestCase ):
UpperCamelCase__ : str =XLMProphetNetTokenizer
UpperCamelCase__ : Any =False
UpperCamelCase__ : Optional[Any] =True
def lowerCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase : Union[str, Any] =XLMProphetNetTokenizer(lowercase_ , keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase ( self : int ) -> List[str]:
"""simple docstring"""
_lowerCamelCase : Tuple ='[PAD]'
_lowerCamelCase : Dict =0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def lowerCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
_lowerCamelCase : int =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '[PAD]' )
self.assertEqual(vocab_keys[1] , '[CLS]' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(lowercase_ ) , 1012 )
def lowerCamelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def lowerCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] =XLMProphetNetTokenizer(lowercase_ , keep_accents=lowercase_ )
_lowerCamelCase : Any =tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCamelCase : Dict =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_lowerCamelCase : Union[str, Any] =tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
_lowerCamelCase : int =tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'[UNK]',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'[UNK]',
'.',
] , )
@cached_property
def lowerCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased' )
@slow
def lowerCamelCase ( self : List[str] ) -> Any:
"""simple docstring"""
_lowerCamelCase : Optional[int] ='Hello World!'
_lowerCamelCase : Optional[int] =[3_5389, 6672, 49, 2]
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@slow
def lowerCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
_lowerCamelCase : Dict ={'input_ids': [[1_1073, 8_2783, 18, 26, 8_2783, 549, 5_1540, 248, 1_7209, 1301, 217, 20, 21_5186, 1325, 147, 1_7209, 1301, 217, 20, 5_6370, 53, 12_2020, 20, 1_6477, 27, 8_7355, 4548, 20, 4728, 7_8392, 17, 15_9969, 18, 26, 2_4491, 629, 15, 538, 2_2704, 5439, 15, 2788, 2_4491, 9885, 15, 4_3534, 605, 15, 814, 1_8403, 3_3200, 29, 15, 4_3534, 2_4458, 1_2410, 111, 2_4966, 8_3669, 9637, 14_4068, 26, 850, 2_2346, 27, 147, 2_4966, 8_3669, 8_3490, 26, 3_9113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 12_2020, 11_5785, 34, 816, 1339, 4_6887, 18, 147, 5_3905, 1951, 4_2238, 4_1170, 1_7732, 834, 436, 15, 2_7523, 9_8733, 217, 147, 5542, 4981, 930, 1_7347, 16, 2], [2_0091, 629, 94, 8_2786, 58, 490, 20, 1528, 84, 5_3905, 344, 8_0592, 11_0128, 1_8822, 5267, 1306, 62, 15_2537, 308, 7997, 401, 12_4427, 549, 3_5442, 225, 109, 1_5055, 2_5748, 147, 7119, 4_3712, 34, 767, 13_5366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 6_3784, 11_9466, 17, 14_7808, 8_8214, 18, 656, 81, 32, 3296, 1_0280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name='microsoft/xprophetnet-large-wiki100-cased' , revision='1acad1643ddd54a44df6a1b797ada8373685d90e' , )
| 199 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ = {
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 111 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : str ) -> str:
_SCREAMING_SNAKE_CASE = len(__A )
_SCREAMING_SNAKE_CASE = len(__A )
_SCREAMING_SNAKE_CASE = (
first_str_length if first_str_length > second_str_length else second_str_length
)
_SCREAMING_SNAKE_CASE = []
for char_count in range(__A ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(__A )
if __name__ == "__main__":
print(alternative_string_arrange('AB', 'XYZ'), end=' ')
| 111 | 1 |
'''simple docstring'''
import re
def __magic_name__ ( __UpperCAmelCase ) -> str:
'''simple docstring'''
if len(re.findall('''[ATCG]''', __UpperCAmelCase ) ) != len(__UpperCAmelCase ):
raise ValueError('''Invalid Strand''' )
return dna.translate(dna.maketrans('''ATCG''', '''TAGC''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
snake_case_ = CycleDiffusionPipeline
snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"negative_prompt",
"height",
"width",
"negative_prompt_embeds",
}
snake_case_ = PipelineTesterMixin.required_optional_params - {"latents"}
snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} )
snake_case_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def A_ ( self : Tuple ):
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
snake_case_ = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=1000 , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , )
torch.manual_seed(0 )
snake_case_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
snake_case_ = CLIPTextModel(lowercase_ )
snake_case_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def A_ ( self : Any , lowercase_ : int , lowercase_ : Optional[Any]=0 ):
snake_case_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
snake_case_ = image / 2 + 0.5
if str(lowercase_ ).startswith('''mps''' ):
snake_case_ = torch.manual_seed(lowercase_ )
else:
snake_case_ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
snake_case_ = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def A_ ( self : Union[str, Any] ):
snake_case_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = CycleDiffusionPipeline(**lowercase_ )
snake_case_ = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case_ = self.get_dummy_inputs(lowercase_ )
snake_case_ = pipe(**lowercase_ )
snake_case_ = output.images
snake_case_ = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
snake_case_ = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def A_ ( self : Union[str, Any] ):
snake_case_ = self.get_dummy_components()
for name, module in components.items():
if hasattr(lowercase_ , '''half''' ):
snake_case_ = module.half()
snake_case_ = CycleDiffusionPipeline(**lowercase_ )
snake_case_ = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case_ = self.get_dummy_inputs(lowercase_ )
snake_case_ = pipe(**lowercase_ )
snake_case_ = output.images
snake_case_ = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
snake_case_ = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def A_ ( self : Optional[int] ):
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def A_ ( self : List[Any] ):
return super().test_inference_batch_single_identical()
@skip_mps
def A_ ( self : Union[str, Any] ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def A_ ( self : Union[str, Any] ):
return super().test_save_load_optional_components()
@skip_mps
def A_ ( self : Union[str, Any] ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def A_ ( self : List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : Union[str, Any] ):
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
snake_case_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
snake_case_ = init_image.resize((512, 512) )
snake_case_ = '''CompVis/stable-diffusion-v1-4'''
snake_case_ = DDIMScheduler.from_pretrained(lowercase_ , subfolder='''scheduler''' )
snake_case_ = CycleDiffusionPipeline.from_pretrained(
lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
snake_case_ = '''A black colored car'''
snake_case_ = '''A blue colored car'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=lowercase_ , source_prompt=lowercase_ , image=lowercase_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase_ , output_type='''np''' , )
snake_case_ = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def A_ ( self : List[str] ):
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
snake_case_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
snake_case_ = init_image.resize((512, 512) )
snake_case_ = '''CompVis/stable-diffusion-v1-4'''
snake_case_ = DDIMScheduler.from_pretrained(lowercase_ , subfolder='''scheduler''' )
snake_case_ = CycleDiffusionPipeline.from_pretrained(lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
snake_case_ = '''A black colored car'''
snake_case_ = '''A blue colored car'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=lowercase_ , source_prompt=lowercase_ , image=lowercase_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase_ , output_type='''np''' , )
snake_case_ = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 56 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : List[str] = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Union[str, Any] = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 251 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """facebook/bart-large-mnli"""
lowerCAmelCase_ = (
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
lowerCAmelCase_ = """text_classifier"""
lowerCAmelCase_ = AutoTokenizer
lowerCAmelCase_ = AutoModelForSequenceClassification
lowerCAmelCase_ = ["""text""", ["""text"""]]
lowerCAmelCase_ = ["""text"""]
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
super().setup()
UpperCamelCase = self.model.config
UpperCamelCase = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
UpperCamelCase = int(A_ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def UpperCAmelCase_ ( self , A_ , A_ )-> Any:
'''simple docstring'''
UpperCamelCase = labels
return self.pre_processor(
[text] * len(A_ ) , [F'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , )
def UpperCAmelCase_ ( self , A_ )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = outputs.logits
UpperCamelCase = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 251 | 1 |
'''simple docstring'''
import math
class lowerCAmelCase_:
'''simple docstring'''
def __init__( self ,__UpperCAmelCase=0 ) -> int: # a graph with Node 0,1,...,N-1
lowerCAmelCase__ : Union[str, Any] = n
lowerCAmelCase__ : Optional[Any] = [
[math.inf for j in range(0 ,__UpperCAmelCase )] for i in range(0 ,__UpperCAmelCase )
] # adjacency matrix for weight
lowerCAmelCase__ : Union[str, Any] = [
[math.inf for j in range(0 ,__UpperCAmelCase )] for i in range(0 ,__UpperCAmelCase )
] # dp[i][j] stores minimum distance from i to j
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> str:
lowerCAmelCase__ : Optional[int] = w
def UpperCAmelCase_ ( self ) -> int:
for k in range(0 ,self.n ):
for i in range(0 ,self.n ):
for j in range(0 ,self.n ):
lowerCAmelCase__ : Optional[Any] = min(self.dp[i][j] ,self.dp[i][k] + self.dp[k][j] )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> List[Any]:
return self.dp[u][v]
if __name__ == "__main__":
_lowerCAmelCase = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 37 |
'''simple docstring'''
from math import sqrt
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and (
number >= 0
), "'number' must been an int and positive"
lowerCAmelCase__ : int = True
# 0 and 1 are none primes.
if number <= 1:
lowerCAmelCase__ : Optional[Any] = False
for divisor in range(2 , int(round(sqrt(UpperCamelCase ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCAmelCase__ : Any = False
break
# precondition
assert isinstance(UpperCamelCase , UpperCamelCase ), "'status' must been from type bool"
return status
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCAmelCase__ : List[str] = list(range(2 , n + 1 ) )
lowerCAmelCase__ : str = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(UpperCamelCase ) ):
for j in range(i + 1 , len(UpperCamelCase ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCAmelCase__ : List[Any] = 0
# filters actual prime numbers.
lowerCAmelCase__ : List[Any] = [x for x in begin_list if x != 0]
# precondition
assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type list"
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and (n > 2), "'N' must been an int and > 2"
lowerCAmelCase__ : List[str] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(UpperCamelCase ):
ans.append(UpperCamelCase )
# precondition
assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type list"
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and number >= 0, "'number' must been an int and >= 0"
lowerCAmelCase__ : Optional[Any] = [] # this list will be returns of the function.
# potential prime number factors.
lowerCAmelCase__ : Dict = 2
lowerCAmelCase__ : Dict = number
if number == 0 or number == 1:
ans.append(UpperCamelCase )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(UpperCamelCase ):
while quotient != 1:
if is_prime(UpperCamelCase ) and (quotient % factor == 0):
ans.append(UpperCamelCase )
quotient /= factor
else:
factor += 1
else:
ans.append(UpperCamelCase )
# precondition
assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type list"
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase__ : Optional[int] = 0
# prime factorization of 'number'
lowerCAmelCase__ : List[str] = prime_factorization(UpperCamelCase )
lowerCAmelCase__ : Any = max(UpperCamelCase )
# precondition
assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type int"
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase__ : List[Any] = 0
# prime factorization of 'number'
lowerCAmelCase__ : List[str] = prime_factorization(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = min(UpperCamelCase )
# precondition
assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type int"
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ), "'number' must been an int"
assert isinstance(number % 2 == 0 , UpperCamelCase ), "compare bust been from type bool"
return number % 2 == 0
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ), "'number' must been an int"
assert isinstance(number % 2 != 0 , UpperCamelCase ), "compare bust been from type bool"
return number % 2 != 0
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert (
isinstance(UpperCamelCase , UpperCamelCase ) and (number > 2) and is_even(UpperCamelCase )
), "'number' must been an int, even and > 2"
lowerCAmelCase__ : Dict = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCAmelCase__ : Dict = get_prime_numbers(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = len(UpperCamelCase )
# run variable for while-loops.
lowerCAmelCase__ : List[str] = 0
lowerCAmelCase__ : List[Any] = None
# exit variable. for break up the loops
lowerCAmelCase__ : Any = True
while i < len_pn and loop:
lowerCAmelCase__ : List[Any] = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCAmelCase__ : Optional[Any] = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(UpperCamelCase , UpperCamelCase )
and (len(UpperCamelCase ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
assert (
isinstance(UpperCamelCase , UpperCamelCase )
and isinstance(UpperCamelCase , UpperCamelCase )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase__ : int = 0
while numbera != 0:
lowerCAmelCase__ : Any = numbera % numbera
lowerCAmelCase__ : str = numbera
lowerCAmelCase__ : List[str] = rest
# precondition
assert isinstance(UpperCamelCase , UpperCamelCase ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
assert (
isinstance(UpperCamelCase , UpperCamelCase )
and isinstance(UpperCamelCase , UpperCamelCase )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase__ : int = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCAmelCase__ : int = prime_factorization(UpperCamelCase )
lowerCAmelCase__ : Any = prime_factorization(UpperCamelCase )
elif numbera == 1 or numbera == 1:
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : List[str] = max(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : List[Any] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCAmelCase__ : int = prime_fac_a.count(UpperCamelCase )
lowerCAmelCase__ : Any = prime_fac_a.count(UpperCamelCase )
for _ in range(max(UpperCamelCase , UpperCamelCase ) ):
ans *= n
else:
lowerCAmelCase__ : Any = prime_fac_a.count(UpperCamelCase )
for _ in range(UpperCamelCase ):
ans *= n
done.append(UpperCamelCase )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCAmelCase__ : Optional[int] = prime_fac_a.count(UpperCamelCase )
for _ in range(UpperCamelCase ):
ans *= n
done.append(UpperCamelCase )
# precondition
assert isinstance(UpperCamelCase , UpperCamelCase ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and (n >= 0), "'number' must been a positive int"
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ : Tuple = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(UpperCamelCase ):
ans += 1
# precondition
assert isinstance(UpperCamelCase , UpperCamelCase ) and is_prime(
UpperCamelCase ), "'ans' must been a prime number and from type int"
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
assert (
is_prime(UpperCamelCase ) and is_prime(UpperCamelCase ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCAmelCase__ : Dict = p_number_a + 1 # jump to the next number
lowerCAmelCase__ : List[Any] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(UpperCamelCase ):
number += 1
while number < p_number_a:
ans.append(UpperCamelCase )
number += 1
# fetch the next prime number.
while not is_prime(UpperCamelCase ):
number += 1
# precondition
assert (
isinstance(UpperCamelCase , UpperCamelCase )
and ans[0] != p_number_a
and ans[len(UpperCamelCase ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and (n >= 1), "'n' must been int and >= 1"
lowerCAmelCase__ : List[Any] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(UpperCamelCase )
# precondition
assert ans[0] == 1 and ans[len(UpperCamelCase ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCAmelCase__ : Optional[int] = get_divisors(UpperCamelCase )
# precondition
assert (
isinstance(UpperCamelCase , UpperCamelCase )
and (divisors[0] == 1)
and (divisors[len(UpperCamelCase ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
assert (
isinstance(UpperCamelCase , UpperCamelCase )
and isinstance(UpperCamelCase , UpperCamelCase )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCAmelCase__ : int = gcd(abs(UpperCamelCase ) , abs(UpperCamelCase ) )
# precondition
assert (
isinstance(UpperCamelCase , UpperCamelCase )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and (n >= 0), "'n' must been a int and >= 0"
lowerCAmelCase__ : str = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and (n >= 0), "'n' must been an int and >= 0"
lowerCAmelCase__ : List[Any] = 0
lowerCAmelCase__ : Any = 1
lowerCAmelCase__ : Optional[Any] = 1 # this will be return
for _ in range(n - 1 ):
lowerCAmelCase__ : Dict = ans
ans += fiba
lowerCAmelCase__ : str = tmp
return ans
| 37 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __snake_case ( __lowerCAmelCase ):
def __init__( self , lowercase , lowercase , lowercase , lowercase = None , ) -> List[str]:
'''simple docstring'''
super().__init__()
self.register_modules(transformer=__a , vae=__a , scheduler=__a)
# create a imagenet -> id dictionary for easier use
a__: Tuple = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(','):
a__: Union[str, Any] = int(__a)
a__: Optional[int] = dict(sorted(self.labels.items()))
def lowerCamelCase_ ( self , lowercase) -> List[int]:
'''simple docstring'''
if not isinstance(__a , __a):
a__: Tuple = list(__a)
for l in label:
if l not in self.labels:
raise ValueError(
f'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.')
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , lowercase , lowercase = 4.0 , lowercase = None , lowercase = 50 , lowercase = "pil" , lowercase = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
a__: int = len(__a)
a__: List[str] = self.transformer.config.sample_size
a__: Dict = self.transformer.config.in_channels
a__: Tuple = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=__a , device=self.device , dtype=self.transformer.dtype , )
a__: List[str] = torch.cat([latents] * 2) if guidance_scale > 1 else latents
a__: Tuple = torch.tensor(__a , device=self.device).reshape(-1)
a__: List[Any] = torch.tensor([10_00] * batch_size , device=self.device)
a__: Dict = torch.cat([class_labels, class_null] , 0) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(__a)
for t in self.progress_bar(self.scheduler.timesteps):
if guidance_scale > 1:
a__: int = latent_model_input[: len(__a) // 2]
a__: Optional[Any] = torch.cat([half, half] , dim=0)
a__: Any = self.scheduler.scale_model_input(__a , __a)
a__: Dict = t
if not torch.is_tensor(__a):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
a__: List[str] = latent_model_input.device.type == 'mps'
if isinstance(__a , __a):
a__: Optional[int] = torch.floataa if is_mps else torch.floataa
else:
a__: Optional[Any] = torch.intaa if is_mps else torch.intaa
a__: List[Any] = torch.tensor([timesteps] , dtype=__a , device=latent_model_input.device)
elif len(timesteps.shape) == 0:
a__: Union[str, Any] = timesteps[None].to(latent_model_input.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
a__: List[Any] = timesteps.expand(latent_model_input.shape[0])
# predict noise model_output
a__: int = self.transformer(
__a , timestep=__a , class_labels=__a).sample
# perform guidance
if guidance_scale > 1:
a__ , a__: Optional[int] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
a__ , a__: Tuple = torch.split(__a , len(__a) // 2 , dim=0)
a__: List[str] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
a__: Union[str, Any] = torch.cat([half_eps, half_eps] , dim=0)
a__: Any = torch.cat([eps, rest] , dim=1)
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
a__ , a__: Optional[Any] = torch.split(__a , __a , dim=1)
else:
a__: Optional[int] = noise_pred
# compute previous image: x_t -> x_t-1
a__: Optional[int] = self.scheduler.step(__a , __a , __a).prev_sample
if guidance_scale > 1:
a__ , a__: Optional[int] = latent_model_input.chunk(2 , dim=0)
else:
a__: Tuple = latent_model_input
a__: int = 1 / self.vae.config.scaling_factor * latents
a__: Any = self.vae.decode(__a).sample
a__: List[str] = (samples / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
a__: Union[str, Any] = samples.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
a__: Any = self.numpy_to_pil(__a)
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=__a)
| 362 | """simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
lowercase__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowercase__ = {
'vocab_file': {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt',
},
'tokenizer_file': {
'unc-nlp/lxmert-base-uncased': (
'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'
),
},
}
lowercase__ = {
'unc-nlp/lxmert-base-uncased': 512,
}
lowercase__ = {
'unc-nlp/lxmert-base-uncased': {'do_lower_case': True},
}
class __snake_case ( __lowerCAmelCase ):
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_INIT_CONFIGURATION
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = LxmertTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ) -> Dict:
'''simple docstring'''
super().__init__(
lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , )
a__: Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('lowercase' , lowercase) != do_lower_case
or normalizer_state.get('strip_accents' , lowercase) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowercase) != tokenize_chinese_chars
):
a__: int = getattr(lowercase , normalizer_state.pop('type'))
a__: Dict = do_lower_case
a__: Dict = strip_accents
a__: Optional[int] = tokenize_chinese_chars
a__: List[Any] = normalizer_class(**lowercase)
a__: Optional[int] = do_lower_case
def lowerCamelCase_ ( self , lowercase , lowercase=None) -> Tuple:
'''simple docstring'''
a__: Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase_ ( self , lowercase , lowercase = None) -> List[int]:
'''simple docstring'''
a__: List[Any] = [self.sep_token_id]
a__: List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def lowerCamelCase_ ( self , lowercase , lowercase = None) -> Tuple[str]:
'''simple docstring'''
a__: List[Any] = self._tokenizer.model.save(lowercase , name=lowercase)
return tuple(lowercase)
| 203 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : int = logging.get_logger(__name__)
lowerCAmelCase_ : str = {
'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='vit_mae'
def __init__( self : Union[str, Any] , __a : Tuple=7_68 , __a : str=12 , __a : List[str]=12 , __a : List[str]=30_72 , __a : Optional[Any]="gelu" , __a : List[str]=0.0 , __a : Dict=0.0 , __a : Union[str, Any]=0.02 , __a : Optional[int]=1e-1_2 , __a : List[str]=2_24 , __a : List[Any]=16 , __a : Tuple=3 , __a : Dict=True , __a : List[str]=16 , __a : List[str]=5_12 , __a : Optional[int]=8 , __a : Dict=20_48 , __a : Any=0.75 , __a : int=False , **__a : List[Any] , ):
super().__init__(**__a )
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = initializer_range
_a = layer_norm_eps
_a = image_size
_a = patch_size
_a = num_channels
_a = qkv_bias
_a = decoder_num_attention_heads
_a = decoder_hidden_size
_a = decoder_num_hidden_layers
_a = decoder_intermediate_size
_a = mask_ratio
_a = norm_pix_loss
| 63 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : Optional[int] = {
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='deta'
__a ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : List[str] , __a : List[str]=None , __a : Dict=9_00 , __a : str=20_48 , __a : Tuple=6 , __a : List[str]=20_48 , __a : str=8 , __a : Union[str, Any]=6 , __a : int=10_24 , __a : List[Any]=8 , __a : Dict=0.0 , __a : Tuple=True , __a : Optional[Any]="relu" , __a : Tuple=2_56 , __a : Optional[Any]=0.1 , __a : int=0.0 , __a : List[Any]=0.0 , __a : Optional[int]=0.02 , __a : str=1.0 , __a : Dict=True , __a : Dict=False , __a : Optional[int]="sine" , __a : Any=5 , __a : List[str]=4 , __a : Optional[int]=4 , __a : List[str]=True , __a : str=3_00 , __a : int=True , __a : int=True , __a : Tuple=1 , __a : Optional[int]=5 , __a : Tuple=2 , __a : Dict=1 , __a : Optional[int]=1 , __a : Any=5 , __a : Optional[int]=2 , __a : Dict=0.1 , __a : str=0.25 , **__a : Tuple , ):
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_a = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(__a , __a ):
_a = backbone_config.pop("model_type" )
_a = CONFIG_MAPPING[backbone_model_type]
_a = config_class.from_dict(__a )
_a = backbone_config
_a = num_queries
_a = max_position_embeddings
_a = d_model
_a = encoder_ffn_dim
_a = encoder_layers
_a = encoder_attention_heads
_a = decoder_ffn_dim
_a = decoder_layers
_a = decoder_attention_heads
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = activation_function
_a = init_std
_a = init_xavier_std
_a = encoder_layerdrop
_a = auxiliary_loss
_a = position_embedding_type
# deformable attributes
_a = num_feature_levels
_a = encoder_n_points
_a = decoder_n_points
_a = two_stage
_a = two_stage_num_proposals
_a = with_box_refine
_a = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
_a = class_cost
_a = bbox_cost
_a = giou_cost
# Loss coefficients
_a = mask_loss_coefficient
_a = dice_loss_coefficient
_a = bbox_loss_coefficient
_a = giou_loss_coefficient
_a = eos_coefficient
_a = focal_alpha
super().__init__(is_encoder_decoder=__a , **__a )
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self : Dict ):
return self.d_model
def UpperCamelCase__ ( self : List[str] ):
_a = copy.deepcopy(self.__dict__ )
_a = self.backbone_config.to_dict()
_a = self.__class__.model_type
return output
| 63 | 1 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a :
def __init__( self , __magic_name__ , __magic_name__=3 , __magic_name__=32 , __magic_name__=3 , __magic_name__=10 , __magic_name__=[10, 20, 30, 40] , __magic_name__=[1, 1, 2, 1] , __magic_name__=True , __magic_name__=True , __magic_name__="relu" , __magic_name__=3 , __magic_name__=None , ) -> Tuple:
_a = parent
_a = batch_size
_a = image_size
_a = num_channels
_a = embeddings_size
_a = hidden_sizes
_a = depths
_a = is_training
_a = use_labels
_a = hidden_act
_a = num_labels
_a = scope
_a = len(__magic_name__ )
def __UpperCAmelCase ( self ) -> List[Any]:
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.num_labels )
_a = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self ) -> str:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ) -> int:
_a = TFResNetModel(config=__magic_name__ )
_a = model(__magic_name__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[Any]:
_a = self.num_labels
_a = TFResNetForImageClassification(__magic_name__ )
_a = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
_a = self.prepare_config_and_inputs()
_a , _a , _a = config_and_inputs
_a = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowerCAmelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_lowerCAmelCase = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def __UpperCAmelCase ( self ) -> str:
_a = TFResNetModelTester(self )
_a = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ )
def __UpperCAmelCase ( self ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self ) -> Optional[int]:
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def __UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def __UpperCAmelCase ( self ) -> Optional[int]:
pass
def __UpperCAmelCase ( self ) -> Union[str, Any]:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(__magic_name__ )
_a = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ['pixel_values']
self.assertListEqual(arg_names[:1] , __magic_name__ )
def __UpperCAmelCase ( self ) -> Dict:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def __UpperCAmelCase ( self ) -> List[Any]:
def check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ):
_a = model_class(__magic_name__ )
_a = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
_a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_a = self.model_tester.num_stages
self.assertEqual(len(__magic_name__ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_a = layer_type
_a = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
@slow
def __UpperCAmelCase ( self ) -> Tuple:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = TFResNetModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def _A () -> Dict:
'''simple docstring'''
_a = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class a ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self ) -> Union[str, Any]:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __UpperCAmelCase ( self ) -> Optional[Any]:
_a = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(images=__magic_name__ , return_tensors='tf' )
# forward pass
_a = model(**__magic_name__ )
# verify the logits
_a = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
_a = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __magic_name__ , atol=1e-4 ) )
| 104 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
a_ : Optional[int] = logging.get_logger(__name__)
a_ : List[Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
a_ : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class a :
_lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE , metadata={"""help""": """Model type selected in the list: """ + """, """.join(_SCREAMING_SNAKE_CASE )} )
_lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} )
_lowerCAmelCase = field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_lowerCAmelCase = field(
default=1_2_8 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , )
_lowerCAmelCase = field(
default=6_4 , metadata={
"""help""": (
"""The maximum number of tokens for the question. Questions longer than this will """
"""be truncated to this length."""
)
} , )
_lowerCAmelCase = field(
default=3_0 , metadata={
"""help""": (
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
)
} , )
_lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
_lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} )
_lowerCAmelCase = field(
default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
_lowerCAmelCase = field(
default=2_0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
_lowerCAmelCase = field(
default=0 , metadata={
"""help""": (
"""language id of input for language-specific xlm models (see"""
""" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"""
)
} , )
_lowerCAmelCase = field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""} )
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """train"""
_lowerCAmelCase = """dev"""
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__ = Split.train , __magic_name__ = False , __magic_name__ = None , __magic_name__ = "pt" , ) -> Any:
_a = args
_a = is_language_sensitive
_a = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__magic_name__ , __magic_name__ ):
try:
_a = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
_a = mode
# Load data features from cache or dataset file
_a = 'v2' if args.version_2_with_negative else 'v1'
_a = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_a = cached_features_file + '.lock'
with FileLock(__magic_name__ ):
if os.path.exists(__magic_name__ ) and not args.overwrite_cache:
_a = time.time()
_a = torch.load(__magic_name__ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_a = self.old_features['features']
_a = self.old_features.get('dataset' , __magic_name__ )
_a = self.old_features.get('examples' , __magic_name__ )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
' future run' )
else:
if mode == Split.dev:
_a = self.processor.get_dev_examples(args.data_dir )
else:
_a = self.processor.get_train_examples(args.data_dir )
_a , _a = squad_convert_examples_to_features(
examples=self.examples , tokenizer=__magic_name__ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__magic_name__ , )
_a = time.time()
torch.save(
{'features': self.features, 'dataset': self.dataset, 'examples': self.examples} , __magic_name__ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ) -> List[Any]:
return len(self.features )
def __getitem__( self , __magic_name__ ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
_a = self.features[i]
_a = torch.tensor(feature.input_ids , dtype=torch.long )
_a = torch.tensor(feature.attention_mask , dtype=torch.long )
_a = torch.tensor(feature.token_type_ids , dtype=torch.long )
_a = torch.tensor(feature.cls_index , dtype=torch.long )
_a = torch.tensor(feature.p_mask , dtype=torch.float )
_a = torch.tensor(feature.is_impossible , dtype=torch.float )
_a = {
'input_ids': input_ids,
'attention_mask': attention_mask,
'token_type_ids': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'cls_index': cls_index, 'p_mask': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'is_impossible': is_impossible} )
if self.is_language_sensitive:
inputs.update({'langs': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_a = torch.tensor(feature.start_position , dtype=torch.long )
_a = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({'start_positions': start_positions, 'end_positions': end_positions} )
return inputs
| 104 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
SCREAMING_SNAKE_CASE__ = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 150 | """simple docstring"""
from __future__ import annotations
def lowerCAmelCase__ ( _UpperCamelCase : list[list[int]] ) -> int:
"""simple docstring"""
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(_UpperCamelCase ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(_UpperCamelCase ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 150 | 1 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_50, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_00, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_00, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class A ( unittest.TestCase ):
def lowercase_ (self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=__UpperCAmelCase , )
assert hasattr(self , "env" )
def lowercase_ (self : str , __UpperCAmelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = f"""{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}"""
# distributed data settings
UpperCAmelCase__ = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__UpperCAmelCase , instance_count=__UpperCAmelCase , instance_type=self.instance_type , debugger_hook_config=__UpperCAmelCase , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__UpperCAmelCase , py_version="py36" , )
def lowercase_ (self : Tuple , __UpperCAmelCase : List[Any] ) -> str:
"""simple docstring"""
TrainingJobAnalytics(__UpperCAmelCase ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def lowercase_ (self : Optional[int] , __UpperCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.create_estimator(__UpperCAmelCase )
# run training
estimator.fit()
# result dataframe
UpperCAmelCase__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
UpperCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __UpperCAmelCase )
| 143 | from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'
),
}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Optional[int] = 'xlm-prophetnet'
__UpperCAmelCase : int = ['past_key_values']
__UpperCAmelCase : Optional[int] = {
'num_attention_heads': 'num_encoder_attention_heads',
}
def __init__(self : str , __UpperCAmelCase : Optional[float] = 0.1 , __UpperCAmelCase : Optional[Union[str, Callable]] = "gelu" , __UpperCAmelCase : Optional[int] = 3_0_5_2_2 , __UpperCAmelCase : Optional[int] = 1_0_2_4 , __UpperCAmelCase : Optional[int] = 4_0_9_6 , __UpperCAmelCase : Optional[int] = 1_2 , __UpperCAmelCase : Optional[int] = 1_6 , __UpperCAmelCase : Optional[int] = 4_0_9_6 , __UpperCAmelCase : Optional[int] = 1_2 , __UpperCAmelCase : Optional[int] = 1_6 , __UpperCAmelCase : Optional[float] = 0.1 , __UpperCAmelCase : Optional[float] = 0.1 , __UpperCAmelCase : Optional[int] = 5_1_2 , __UpperCAmelCase : Optional[float] = 0.02 , __UpperCAmelCase : Optional[bool] = True , __UpperCAmelCase : Optional[bool] = True , __UpperCAmelCase : Optional[int] = 0 , __UpperCAmelCase : Optional[int] = 2 , __UpperCAmelCase : Optional[int] = 3_2 , __UpperCAmelCase : Optional[int] = 1_2_8 , __UpperCAmelCase : Optional[bool] = False , __UpperCAmelCase : Optional[float] = 0.0 , __UpperCAmelCase : Optional[bool] = True , __UpperCAmelCase : Optional[int] = 0 , __UpperCAmelCase : Optional[int] = 1 , __UpperCAmelCase : Optional[int] = 2 , **__UpperCAmelCase : Optional[Any] , ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = encoder_ffn_dim
UpperCAmelCase__ = num_encoder_layers
UpperCAmelCase__ = num_encoder_attention_heads
UpperCAmelCase__ = decoder_ffn_dim
UpperCAmelCase__ = num_decoder_layers
UpperCAmelCase__ = num_decoder_attention_heads
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = init_std # Normal(0, this parameter)
UpperCAmelCase__ = activation_function
# parameters for xlmprophetnet
UpperCAmelCase__ = ngram
UpperCAmelCase__ = num_buckets
UpperCAmelCase__ = relative_max_distance
UpperCAmelCase__ = disable_ngram_loss
UpperCAmelCase__ = eps
# 3 Types of Dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = activation_dropout
UpperCAmelCase__ = dropout
UpperCAmelCase__ = use_cache
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , add_cross_attention=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
@property
def lowercase_ (self : int ) -> int:
"""simple docstring"""
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def lowercase_ (self : Any , __UpperCAmelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"
" `num_decoder_layers`." )
| 143 | 1 |
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Any:
"""simple docstring"""
assert x is not None
assert y is not None
_lowercase =len(__snake_case )
_lowercase =len(__snake_case )
# declaring the array for storing the dp values
_lowercase =[[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
_lowercase =1 if x[i - 1] == y[j - 1] else 0
_lowercase =max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
_lowercase =''''''
_lowercase , _lowercase =m, n
while i > 0 and j > 0:
_lowercase =1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
_lowercase =x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
UpperCAmelCase__ = '''AGGTAB'''
UpperCAmelCase__ = '''GXTXAYB'''
UpperCAmelCase__ = 4
UpperCAmelCase__ = '''GTAB'''
UpperCAmelCase__ ,UpperCAmelCase__ = longest_common_subsequence(a, b)
print('''len =''', ln, ''', sub-sequence =''', subseq)
import doctest
doctest.testmod()
| 5 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
UpperCAmelCase__ = ['''\nclass''', '''\ndef''', '''\n#''', '''\n@''', '''\nprint''', '''\nif''']
class lowerCamelCase__ ( lowerCAmelCase):
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=1 ) -> Dict:
_lowercase =tokenizer
_lowercase =dataset
_lowercase =len(UpperCAmelCase ) if n_tasks is None else n_tasks
_lowercase =n_copies
def __iter__(self ) -> Optional[Any]:
_lowercase =[]
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
_lowercase =self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowerCamelCase__ ( lowerCAmelCase):
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
_lowercase =start_length
_lowercase =eof_strings
_lowercase =tokenizer
def __call__(self , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Dict:
_lowercase =self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
_lowercase =[]
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(UpperCAmelCase )
def UpperCAmelCase_ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
_lowercase =re.split('''(%s)''' % '''|'''.join(__snake_case ) , __snake_case )
# last string should be ""
return "".join(string_list[:-2] )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=20 , **__snake_case ) -> Tuple:
"""simple docstring"""
_lowercase =defaultdict(__snake_case ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__snake_case ) ):
with torch.no_grad():
_lowercase =batch['''ids'''].shape[-1]
_lowercase =accelerator.unwrap_model(__snake_case ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=__snake_case , **__snake_case )
# each task is generated batch_size times
_lowercase =batch['''task_id'''].repeat(__snake_case )
_lowercase =accelerator.pad_across_processes(
__snake_case , dim=1 , pad_index=tokenizer.pad_token_id )
_lowercase , _lowercase =accelerator.gather((generated_tokens, generated_tasks) )
_lowercase =generated_tokens.cpu().numpy()
_lowercase =generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__snake_case , __snake_case ):
gen_token_dict[task].append(__snake_case )
_lowercase =[[] for _ in range(__snake_case )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
_lowercase =tokenizer.decode(__snake_case , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case )
code_gens[task].append(remove_last_block(__snake_case ) )
return code_gens
def UpperCAmelCase_ ( ) -> str:
"""simple docstring"""
_lowercase =HfArgumentParser(__snake_case )
_lowercase =parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
_lowercase =args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
_lowercase ='''false'''
if args.num_workers is None:
_lowercase =multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
_lowercase =Accelerator()
set_seed(args.seed , device_specific=__snake_case )
# Load model and tokenizer
_lowercase =AutoTokenizer.from_pretrained(args.model_ckpt )
_lowercase =tokenizer.eos_token
_lowercase =AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
_lowercase ={
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , __snake_case , __snake_case )] ),
}
# Load evaluation dataset and metric
_lowercase =load_dataset('''openai_humaneval''' )
_lowercase =load_metric('''code_eval''' )
_lowercase =args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
_lowercase =args.n_samples // args.batch_size
_lowercase =TokenizedDataset(__snake_case , human_eval['''test'''] , n_copies=__snake_case , n_tasks=__snake_case )
# do not confuse args.batch_size, which is actually the num_return_sequences
_lowercase =DataLoader(__snake_case , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
_lowercase =code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
_lowercase , _lowercase =accelerator.prepare(__snake_case , __snake_case )
_lowercase =complete_code(
__snake_case , __snake_case , __snake_case , __snake_case , n_tasks=__snake_case , batch_size=args.batch_size , **__snake_case , )
if accelerator.is_main_process:
_lowercase =[]
for task in tqdm(range(__snake_case ) ):
_lowercase =human_eval['''test'''][task]['''test''']
_lowercase =F"check({human_eval['test'][task]['entry_point']})"
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
_lowercase , _lowercase =code_eval_metric.compute(
references=__snake_case , predictions=__snake_case , num_workers=args.num_workers )
print(F"Results: {pass_at_k}" )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(__snake_case , __snake_case )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 5 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase_ ( __a , __a , __a , ) -> str:
"""simple docstring"""
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif stress < 0:
raise ValueError("Stress cannot be negative" )
elif tangential_force < 0:
raise ValueError("Tangential Force cannot be negative" )
elif area < 0:
raise ValueError("Area cannot be negative" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "distilbert"
lowercase_ = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__(self : Any , UpperCAmelCase_ : str=30_522 , UpperCAmelCase_ : Union[str, Any]=512 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Optional[Any]=6 , UpperCAmelCase_ : Optional[Any]=12 , UpperCAmelCase_ : Any=768 , UpperCAmelCase_ : List[Any]=4 * 768 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Optional[int]=0.2 , UpperCAmelCase_ : int=0 , **UpperCAmelCase_ : List[Any] , ) ->Any:
'''simple docstring'''
lowerCamelCase__: int =vocab_size
lowerCamelCase__: Any =max_position_embeddings
lowerCamelCase__: Optional[int] =sinusoidal_pos_embds
lowerCamelCase__: str =n_layers
lowerCamelCase__: str =n_heads
lowerCamelCase__: str =dim
lowerCamelCase__: Optional[Any] =hidden_dim
lowerCamelCase__: Dict =dropout
lowerCamelCase__: Optional[Any] =attention_dropout
lowerCamelCase__: int =activation
lowerCamelCase__: Dict =initializer_range
lowerCamelCase__: Optional[Any] =qa_dropout
lowerCamelCase__: int =seq_classif_dropout
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__: Dict ={0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase__: Optional[int] ={0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 273 | 0 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def __magic_name__ ( A : Tuple, A : List[Any], A : List[Any], A : Dict ):
'''simple docstring'''
for param, grad_param in zip(model_a.parameters(), model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def __magic_name__ ( A : List[Any], A : int, A : Optional[Any], A : Optional[int], A : Any=True ):
'''simple docstring'''
model.train()
a = model(A )
a = F.mse_loss(A, target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(A )
def __magic_name__ ( A : Any, A : Any=False ):
'''simple docstring'''
set_seed(42 )
a = RegressionModel()
a = deepcopy(A )
a = RegressionDataset(length=80 )
a = DataLoader(A, batch_size=16 )
model.to(accelerator.device )
if sched:
a = AdamW(params=model.parameters(), lr=1E-3 )
a = AdamW(params=ddp_model.parameters(), lr=1E-3 )
a = LambdaLR(A, lr_lambda=lambda A : epoch**0.65 )
a = LambdaLR(A, lr_lambda=lambda A : epoch**0.65 )
# Make a copy of `model`
if sched:
a , a , a , a = accelerator.prepare(A, A, A, A )
else:
a , a = accelerator.prepare(A, A )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def __magic_name__ ( A : List[Any] ):
'''simple docstring'''
a , a , a = get_training_setup(A )
# Use a single batch
a , a = next(iter(A ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
a , a = accelerator.gather((ddp_input, ddp_target) )
a , a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(A, A, A, A )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(A ):
step_model(A, A, A, A )
else:
# Sync grads
step_model(A, A, A, A )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(A, A, A, A )
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad, ddp_param.grad ), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
a = ddp_input[torch.randperm(len(A ) )]
def __magic_name__ ( A : Optional[int] ):
'''simple docstring'''
a , a , a = get_training_setup(A )
# Use a single batch
a , a = next(iter(A ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
a , a = accelerator.gather((ddp_input, ddp_target) )
a , a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(A, A, A, A )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(A ):
step_model(A, A, A, A )
else:
# Sync grads
step_model(A, A, A, A )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), F"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
a = ddp_input[torch.randperm(len(A ) )]
def __magic_name__ ( A : List[Any]=False, A : List[Any]=False ):
'''simple docstring'''
a = Accelerator(
split_batches=A, dispatch_batches=A, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
a , a , a = get_training_setup(A )
for iteration, batch in enumerate(A ):
a , a = batch.values()
# Gather the distributed inputs and targs for the base model
a , a = accelerator.gather((ddp_input, ddp_target) )
a , a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(A, A, A, A, A )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(A ):
step_model(A, A, A, A )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(A ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
a = ddp_input[torch.randperm(len(A ) )]
GradientState._reset_state()
def __magic_name__ ( A : List[Any]=False, A : Any=False ):
'''simple docstring'''
a = Accelerator(
split_batches=A, dispatch_batches=A, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
a , a , a , a , a , a , a = get_training_setup(A, A )
for iteration, batch in enumerate(A ):
a , a = batch.values()
# Gather the distributed inputs and targs for the base model
a , a = accelerator.gather((ddp_input, ddp_target) )
a , a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(A, A, A, A, A )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(A )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(A ):
step_model(A, A, A, A )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n"""
a = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(A ))
if accelerator.num_processes > 1:
check_model_parameters(A, A, A, A )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def __magic_name__ ( ):
'''simple docstring'''
a = Accelerator()
a = RegressionDataset(length=80 )
a = DataLoader(A, batch_size=16 )
a = RegressionDataset(length=96 )
a = DataLoader(A, batch_size=16 )
a , a = accelerator.prepare(A, A )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(A ):
assert id(accelerator.gradient_state.active_dataloader ) == id(A )
if iteration < len(A ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(A ):
assert id(accelerator.gradient_state.active_dataloader ) == id(A )
if batch_num < len(A ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def __magic_name__ ( ):
'''simple docstring'''
a = Accelerator()
a = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(A )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(A )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, ", F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""", )
test_gradient_accumulation(A, A )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<", "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, ", "`split_batches=False`, `dispatch_batches=False`**", )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, ", F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""", )
test_gradient_accumulation_with_opt_and_scheduler(A, A )
def __magic_name__ ( A : Optional[int] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 107 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__SCREAMING_SNAKE_CASE :Optional[int] = True
except (ImportError, ModuleNotFoundError):
__SCREAMING_SNAKE_CASE :str = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def UpperCAmelCase_ ( __lowercase : str ) -> str:
'''simple docstring'''
re.sub("<n>" , "" , __lowercase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__lowercase ) )
| 22 | 0 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self : int ,lowercase__ : List[str] ,lowercase__ : List[str]=7 ,lowercase__ : Optional[Any]=3 ,lowercase__ : Optional[Any]=3_0 ,lowercase__ : Tuple=4_0_0 ,lowercase__ : List[str]=True ,lowercase__ : List[str]=None ,lowercase__ : Union[str, Any]=True ,lowercase__ : Tuple=[0.5, 0.5, 0.5] ,lowercase__ : List[str]=[0.5, 0.5, 0.5] ,lowercase__ : Union[str, Any]=True ,lowercase__ : List[str]=1 / 2_5_5 ,lowercase__ : List[str]=True ,):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__lowercase = size if size is not None else {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3}
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = min_resolution
__lowercase = max_resolution
__lowercase = do_resize
__lowercase = size
__lowercase = do_normalize
__lowercase = image_mean
__lowercase = image_std
__lowercase = do_rescale
__lowercase = rescale_factor
__lowercase = do_pad
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : Union[str, Any] ,lowercase__ : Union[str, Any]=False ):
if not batched:
__lowercase = image_inputs[0]
if isinstance(lowercase__ ,Image.Image ):
__lowercase , __lowercase = image.size
else:
__lowercase , __lowercase = image.shape[1], image.shape[2]
if w < h:
__lowercase = int(self.size['''shortest_edge'''] * h / w )
__lowercase = self.size['''shortest_edge''']
elif w > h:
__lowercase = self.size['''shortest_edge''']
__lowercase = int(self.size['''shortest_edge'''] * w / h )
else:
__lowercase = self.size['''shortest_edge''']
__lowercase = self.size['''shortest_edge''']
else:
__lowercase = []
for image in image_inputs:
__lowercase , __lowercase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowercase = max(lowercase__ ,key=lambda lowercase__ : item[0] )[0]
__lowercase = max(lowercase__ ,key=lambda lowercase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = DeformableDetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = DeformableDetrImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase__ ,'''image_mean''' ) )
self.assertTrue(hasattr(lowercase__ ,'''image_std''' ) )
self.assertTrue(hasattr(lowercase__ ,'''do_normalize''' ) )
self.assertTrue(hasattr(lowercase__ ,'''do_resize''' ) )
self.assertTrue(hasattr(lowercase__ ,'''do_rescale''' ) )
self.assertTrue(hasattr(lowercase__ ,'''do_pad''' ) )
self.assertTrue(hasattr(lowercase__ ,'''size''' ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3} )
self.assertEqual(image_processor.do_pad ,lowercase__ )
__lowercase = self.image_processing_class.from_dict(
self.image_processor_dict ,size=4_2 ,max_size=8_4 ,pad_and_return_pixel_mask=lowercase__ )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 4_2, '''longest_edge''': 8_4} )
self.assertEqual(image_processor.do_pad ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
pass
def SCREAMING_SNAKE_CASE ( self : List[str] ):
# Initialize image_processing
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ ,Image.Image )
# Test not batched input
__lowercase = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
__lowercase , __lowercase = self.image_processor_tester.get_expected_values(lowercase__ )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
__lowercase , __lowercase = self.image_processor_tester.get_expected_values(lowercase__ ,batched=lowercase__ )
__lowercase = image_processing(lowercase__ ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def SCREAMING_SNAKE_CASE ( self : Any ):
# Initialize image_processing
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase__ ,numpify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ ,np.ndarray )
# Test not batched input
__lowercase = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
__lowercase , __lowercase = self.image_processor_tester.get_expected_values(lowercase__ )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
__lowercase = image_processing(lowercase__ ,return_tensors='''pt''' ).pixel_values
__lowercase , __lowercase = self.image_processor_tester.get_expected_values(lowercase__ ,batched=lowercase__ )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def SCREAMING_SNAKE_CASE ( self : Any ):
# Initialize image_processing
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase__ ,torchify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ ,torch.Tensor )
# Test not batched input
__lowercase = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
__lowercase , __lowercase = self.image_processor_tester.get_expected_values(lowercase__ )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
__lowercase = image_processing(lowercase__ ,return_tensors='''pt''' ).pixel_values
__lowercase , __lowercase = self.image_processor_tester.get_expected_values(lowercase__ ,batched=lowercase__ )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ):
# prepare image and target
__lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' ,'''r''' ) as f:
__lowercase = json.loads(f.read() )
__lowercase = {'''image_id''': 3_9_7_6_9, '''annotations''': target}
# encode them
__lowercase = DeformableDetrImageProcessor()
__lowercase = image_processing(images=lowercase__ ,annotations=lowercase__ ,return_tensors='''pt''' )
# verify pixel values
__lowercase = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape ,lowercase__ )
__lowercase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] ,lowercase__ ,atol=1e-4 ) )
# verify area
__lowercase = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] ,lowercase__ ) )
# verify boxes
__lowercase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape ,lowercase__ )
__lowercase = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] ,lowercase__ ,atol=1e-3 ) )
# verify image_id
__lowercase = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] ,lowercase__ ) )
# verify is_crowd
__lowercase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] ,lowercase__ ) )
# verify class_labels
__lowercase = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] ,lowercase__ ) )
# verify orig_size
__lowercase = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] ,lowercase__ ) )
# verify size
__lowercase = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] ,lowercase__ ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
# prepare image, target and masks_path
__lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' ,'''r''' ) as f:
__lowercase = json.loads(f.read() )
__lowercase = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9_7_6_9, '''segments_info''': target}
__lowercase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
__lowercase = DeformableDetrImageProcessor(format='''coco_panoptic''' )
__lowercase = image_processing(images=lowercase__ ,annotations=lowercase__ ,masks_path=lowercase__ ,return_tensors='''pt''' )
# verify pixel values
__lowercase = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape ,lowercase__ )
__lowercase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] ,lowercase__ ,atol=1e-4 ) )
# verify area
__lowercase = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] ,lowercase__ ) )
# verify boxes
__lowercase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape ,lowercase__ )
__lowercase = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] ,lowercase__ ,atol=1e-3 ) )
# verify image_id
__lowercase = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] ,lowercase__ ) )
# verify is_crowd
__lowercase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] ,lowercase__ ) )
# verify class_labels
__lowercase = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] ,lowercase__ ) )
# verify masks
__lowercase = 8_2_2_8_7_3
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() ,lowercase__ )
# verify orig_size
__lowercase = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] ,lowercase__ ) )
# verify size
__lowercase = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] ,lowercase__ ) )
| 52 |
'''simple docstring'''
lowerCAmelCase__ = {
"joule": 1.0,
"kilojoule": 1000,
"megajoule": 100_0000,
"gigajoule": 10_0000_0000,
"wattsecond": 1.0,
"watthour": 3600,
"kilowatthour": 360_0000,
"newtonmeter": 1.0,
"calorie_nutr": 4186.8,
"kilocalorie_nutr": 418_6800.00,
"electronvolt": 1.602_176_634e-19,
"britishthermalunit_it": 1055.0_5585,
"footpound": 1.355_818,
}
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__lowercase = (
F"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"
F"Valid values are: {', '.join(A__ )}"
)
raise ValueError(A__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : int = KandinskyImgaImgPipeline
UpperCAmelCase__ : List[Any] = ["prompt", "image_embeds", "negative_image_embeds", "image"]
UpperCAmelCase__ : str = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
UpperCAmelCase__ : Optional[int] = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase__ : Optional[Any] = False
@property
def _a ( self ) -> Optional[Any]:
return 32
@property
def _a ( self ) -> Dict:
return 32
@property
def _a ( self ) -> List[str]:
return self.time_input_dim
@property
def _a ( self ) -> Union[str, Any]:
return self.time_input_dim * 4
@property
def _a ( self ) -> List[str]:
return 100
@property
def _a ( self ) -> str:
__UpperCamelCase =XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def _a ( self ) -> int:
torch.manual_seed(0 )
__UpperCamelCase =MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
__UpperCamelCase =MultilingualCLIP(A_ )
__UpperCamelCase =text_encoder.eval()
return text_encoder
@property
def _a ( self ) -> int:
torch.manual_seed(0 )
__UpperCamelCase ={
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__UpperCamelCase =UNetaDConditionModel(**A_ )
return model
@property
def _a ( self ) -> Any:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _a ( self ) -> int:
torch.manual_seed(0 )
__UpperCamelCase =VQModel(**self.dummy_movq_kwargs )
return model
def _a ( self ) -> Any:
__UpperCamelCase =self.dummy_text_encoder
__UpperCamelCase =self.dummy_tokenizer
__UpperCamelCase =self.dummy_unet
__UpperCamelCase =self.dummy_movq
__UpperCamelCase ={
'num_train_timesteps': 1000,
'beta_schedule': 'linear',
'beta_start': 0.0_0085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
__UpperCamelCase =DDIMScheduler(**A_ )
__UpperCamelCase ={
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def _a ( self , A_ , A_=0 ) -> int:
__UpperCamelCase =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(A_ ) ).to(A_ )
__UpperCamelCase =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(A_ )
# create init_image
__UpperCamelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(A_ ) ).to(A_ )
__UpperCamelCase =image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCamelCase =Image.fromarray(np.uinta(A_ ) ).convert('RGB' ).resize((256, 256) )
if str(A_ ).startswith('mps' ):
__UpperCamelCase =torch.manual_seed(A_ )
else:
__UpperCamelCase =torch.Generator(device=A_ ).manual_seed(A_ )
__UpperCamelCase ={
'prompt': 'horse',
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def _a ( self ) -> Dict:
__UpperCamelCase ='cpu'
__UpperCamelCase =self.get_dummy_components()
__UpperCamelCase =self.pipeline_class(**A_ )
__UpperCamelCase =pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase =pipe(**self.get_dummy_inputs(A_ ) )
__UpperCamelCase =output.images
__UpperCamelCase =pipe(
**self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0]
__UpperCamelCase =image[0, -3:, -3:, -1]
__UpperCamelCase =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase =np.array(
[0.6147_4943, 0.607_3539, 0.4330_8544, 0.592_8269, 0.4749_3595, 0.4675_5973, 0.461_3838, 0.4536_8797, 0.5011_9233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ) -> List[str]:
__UpperCamelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_img2img_frog.npy' )
__UpperCamelCase =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
__UpperCamelCase ='A red cartoon frog, 4k'
__UpperCamelCase =KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa )
pipe_prior.to(A_ )
__UpperCamelCase =KandinskyImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1' , torch_dtype=torch.floataa )
__UpperCamelCase =pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
__UpperCamelCase =torch.Generator(device='cpu' ).manual_seed(0 )
__UpperCamelCase , __UpperCamelCase =pipe_prior(
A_ , generator=A_ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
__UpperCamelCase =pipeline(
A_ , image=A_ , image_embeds=A_ , negative_image_embeds=A_ , generator=A_ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='np' , )
__UpperCamelCase =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(A_ , A_ )
| 62 |
import os
from typing import Dict, List, Tuple, TypeVar, Union
__a :Any = TypeVar('T')
__a :Union[str, Any] = Union[List[T], Tuple[T, ...]]
__a :List[str] = Union[T, List[T], Dict[str, T]]
__a :Any = Union[str, bytes, os.PathLike] | 312 | 0 |
"""simple docstring"""
from collections.abc import Sequence
from queue import Queue
class UpperCamelCase_ :
def __init__( self : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Any=None ) -> List[Any]:
UpperCAmelCase_ : int = start
UpperCAmelCase_ : int = end
UpperCAmelCase_ : str = val
UpperCAmelCase_ : Dict = (start + end) // 2
UpperCAmelCase_ : List[str] = left
UpperCAmelCase_ : List[Any] = right
def __repr__( self : Tuple ) -> List[Any]:
return f"""SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})"""
class UpperCamelCase_ :
def __init__( self : int , lowerCAmelCase_ : Sequence , lowerCAmelCase_ : List[str] ) -> Any:
UpperCAmelCase_ : Dict = collection
UpperCAmelCase_ : int = function
if self.collection:
UpperCAmelCase_ : List[str] = self._build_tree(0 , len(lowercase_ ) - 1 )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str ) -> int:
self._update_tree(self.root , lowercase_ , lowercase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple ) -> List[Any]:
return self._query_range(self.root , lowercase_ , lowercase_ )
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict ) -> str:
if start == end:
return SegmentTreeNode(lowercase_ , lowercase_ , self.collection[start] )
UpperCAmelCase_ : Dict = (start + end) // 2
UpperCAmelCase_ : str = self._build_tree(lowercase_ , lowercase_ )
UpperCAmelCase_ : Dict = self._build_tree(mid + 1 , lowercase_ )
return SegmentTreeNode(lowercase_ , lowercase_ , self.fn(left.val , right.val ) , lowercase_ , lowercase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple ) -> Any:
if node.start == i and node.end == i:
UpperCAmelCase_ : List[Any] = val
return
if i <= node.mid:
self._update_tree(node.left , lowercase_ , lowercase_ )
else:
self._update_tree(node.right , lowercase_ , lowercase_ )
UpperCAmelCase_ : int = self.fn(node.left.val , node.right.val )
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] ) -> List[str]:
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , lowercase_ , lowercase_ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , lowercase_ , node.mid ) , self._query_range(node.right , node.mid + 1 , lowercase_ ) , )
else:
# range in right child tree
return self._query_range(node.right , lowercase_ , lowercase_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
if self.root is not None:
UpperCAmelCase_ : int = Queue()
queue.put(self.root )
while not queue.empty():
UpperCAmelCase_ : int = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
lowerCamelCase_ = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print() | 358 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
lowerCamelCase_ = tuple[int, int]
class UpperCamelCase_ :
def __init__( self : List[Any] , lowerCAmelCase_ : set[int] , lowerCAmelCase_ : Mapping[EdgeT, int] ) -> None:
UpperCAmelCase_ : set[int] = vertices
UpperCAmelCase_ : dict[EdgeT, int] = {
(min(lowerCAmelCase_ ), max(lowerCAmelCase_ )): weight for edge, weight in edges.items()
}
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : EdgeT , lowerCAmelCase_ : int ) -> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
UpperCAmelCase_ : Tuple = weight
def _SCREAMING_SNAKE_CASE ( self : str ) -> Graph:
UpperCAmelCase_ : Graph = Graph({min(self.vertices )} , {} )
UpperCAmelCase_ : EdgeT
UpperCAmelCase_ : int
UpperCAmelCase_ : EdgeT
UpperCAmelCase_ : int
while len(subgraph.vertices ) < len(self.vertices ):
UpperCAmelCase_ : int = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
UpperCAmelCase_ : Tuple = edge
UpperCAmelCase_ : Dict = weight
subgraph.add_edge(lowerCAmelCase_ , lowerCAmelCase_ )
return subgraph
def snake_case ( A__ = "p107_network.txt" ):
UpperCAmelCase_ : str = os.path.abspath(os.path.dirname(A__ ) )
UpperCAmelCase_ : str = os.path.join(A__ ,A__ )
UpperCAmelCase_ : dict[EdgeT, int] = {}
UpperCAmelCase_ : list[str]
UpperCAmelCase_ : int
UpperCAmelCase_ : int
with open(A__ ) as f:
UpperCAmelCase_ : Dict = f.read().strip().split("\n" )
UpperCAmelCase_ : str = [line.split("," ) for line in data]
for edgea in range(1 ,len(A__ ) ):
for edgea in range(A__ ):
if adjaceny_matrix[edgea][edgea] != "-":
UpperCAmelCase_ : Union[str, Any] = int(adjaceny_matrix[edgea][edgea] )
UpperCAmelCase_ : Graph = Graph(set(range(len(A__ ) ) ) ,A__ )
UpperCAmelCase_ : Graph = graph.prims_algorithm()
UpperCAmelCase_ : int = sum(graph.edges.values() )
UpperCAmelCase_ : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f'{solution() = }')
| 253 | 0 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase : str =logging.get_logger(__name__)
class __a ( A__ ):
_lowerCAmelCase : Tuple = ['''pixel_values''']
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : int = 32 , SCREAMING_SNAKE_CASE : int=PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE : bool = True , **SCREAMING_SNAKE_CASE : Dict , ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = do_resize
UpperCamelCase__ : int = do_rescale
UpperCamelCase__ : Dict = size_divisor
UpperCamelCase__ : int = resample
super().__init__(**SCREAMING_SNAKE_CASE )
def __lowercase ( self : str , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[ChannelDimension] = None , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : Any = get_image_size(SCREAMING_SNAKE_CASE )
# Rounds the height and width down to the closest multiple of size_divisor
UpperCamelCase__ : Union[str, Any] = height // size_divisor * size_divisor
UpperCamelCase__ : List[str] = width // size_divisor * size_divisor
UpperCamelCase__ : Any = resize(SCREAMING_SNAKE_CASE , (new_h, new_w) , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
return image
def __lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : Optional[ChannelDimension] = None , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
return rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : int=None , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[Union[TensorType, str]] = None , SCREAMING_SNAKE_CASE : ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE : int , ):
'''simple docstring'''
UpperCamelCase__ : Tuple = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ : Dict = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ : int = size_divisor if size_divisor is not None else self.size_divisor
UpperCamelCase__ : Union[str, Any] = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("size_divisor is required for resizing" )
UpperCamelCase__ : Union[str, Any] = make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError("Invalid image(s)" )
# All transformations expect numpy arrays.
UpperCamelCase__ : str = [to_numpy_array(SCREAMING_SNAKE_CASE ) for img in images]
if do_resize:
UpperCamelCase__ : Union[str, Any] = [self.resize(SCREAMING_SNAKE_CASE , size_divisor=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
UpperCamelCase__ : str = [self.rescale(SCREAMING_SNAKE_CASE , scale=1 / 2_55 ) for image in images]
UpperCamelCase__ : int = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
UpperCamelCase__ : Dict = {"pixel_values": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE ) | 189 |
from math import factorial
class __a :
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : Tuple = real
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Union[str, Any] = [1] * rank
else:
UpperCamelCase__ : int = rank
def __repr__( self : Tuple ):
'''simple docstring'''
return (
F'{self.real}+'
F'{"+".join(str(SCREAMING_SNAKE_CASE )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'
)
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , SCREAMING_SNAKE_CASE )
def __add__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return Dual(self.real + other , self.duals )
UpperCamelCase__ : Optional[int] = self.duals.copy()
UpperCamelCase__ : Any = other.duals.copy()
if len(SCREAMING_SNAKE_CASE ) > len(SCREAMING_SNAKE_CASE ):
o_dual.extend([1] * (len(SCREAMING_SNAKE_CASE ) - len(SCREAMING_SNAKE_CASE )) )
elif len(SCREAMING_SNAKE_CASE ) < len(SCREAMING_SNAKE_CASE ):
s_dual.extend([1] * (len(SCREAMING_SNAKE_CASE ) - len(SCREAMING_SNAKE_CASE )) )
UpperCamelCase__ : Optional[int] = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , SCREAMING_SNAKE_CASE )
_lowerCAmelCase : Dict = __add__
def __sub__( self : Tuple , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
return self + other * -1
def __mul__( self : int , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : str = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , SCREAMING_SNAKE_CASE )
_lowerCAmelCase : Union[str, Any] = __mul__
def __truediv__( self : List[Any] , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : str = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , SCREAMING_SNAKE_CASE )
raise ValueError
def __floordiv__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Dict = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , SCREAMING_SNAKE_CASE )
raise ValueError
def __pow__( self : str , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if n < 0 or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
UpperCamelCase__ : str = self
for _ in range(n - 1 ):
x *= self
return x
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
if not callable(__lowerCAmelCase ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(__lowerCAmelCase , (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("differentiate() requires an int as input for order" )
UpperCamelCase__ : Optional[Any] = Dual(__lowerCAmelCase , 1 )
UpperCamelCase__ : Any = func(__lowerCAmelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[Any]:
return y**2 * y**4
print(differentiate(f, 9, 2)) | 189 | 1 |
'''simple docstring'''
def a_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Optional[Any] ) -> int:
return x if y == 0 else greatest_common_divisor(a__ ,x % y )
def a_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : int ) -> int:
return (x * y) // greatest_common_divisor(a__ ,a__ )
def a_ ( _UpperCAmelCase : List[str] = 20 ) -> int:
__snake_case : List[Any] = 1
for i in range(1 ,n + 1 ):
__snake_case : str = lcm(a__ ,a__ )
return g
if __name__ == "__main__":
print(F"""{solution() = }""")
| 360 |
'''simple docstring'''
from __future__ import annotations
A__ : List[Any] = list[list[int]]
# assigning initial values to the grid
A__ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
A__ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def a_ ( _UpperCAmelCase : Matrix ,_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def a_ ( _UpperCAmelCase : Matrix ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def a_ ( _UpperCAmelCase : Matrix ) -> Matrix | None:
if location := find_empty_location(_UpperCAmelCase ):
__snake_case , __snake_case : Optional[int] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 ,10 ):
if is_safe(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ):
__snake_case : Union[str, Any] = digit
if sudoku(_UpperCAmelCase ) is not None:
return grid
__snake_case : Optional[Any] = 0
return None
def a_ ( _UpperCAmelCase : Matrix ) -> None:
for row in grid:
for cell in row:
print(_UpperCAmelCase ,end=' ' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 2_0)
print_solution(example_grid)
print('''\nExample grid solution:''')
A__ : List[str] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''')
| 0 | 0 |
def A_ ( a , a ):
"""simple docstring"""
while b:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = b, a % b
return a
def A_ ( a , a ):
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(a , a % b )
def A_ ( ):
"""simple docstring"""
print(f"euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}" )
print(f"euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}" )
print(f"euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}" )
print(f"euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}" )
print(f"euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}" )
print(f"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}" )
print(f"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}" )
print(f"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}" )
print(f"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}" )
print(f"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}" )
if __name__ == "__main__":
main()
| 253 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _A ( __magic_name__ , unittest.TestCase):
SCREAMING_SNAKE_CASE : str = AudioLDMPipeline
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_AUDIO_PARAMS
SCREAMING_SNAKE_CASE : Optional[int] = TEXT_TO_AUDIO_BATCH_PARAMS
SCREAMING_SNAKE_CASE : Dict = frozenset(
[
'''num_inference_steps''',
'''num_waveforms_per_prompt''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
])
def UpperCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(32, 64) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ : Any = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_SCREAMING_SNAKE_CASE , set_alpha_to_one=_SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
SCREAMING_SNAKE_CASE_ : List[str] = ClapTextModelWithProjection(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=77 )
SCREAMING_SNAKE_CASE_ : Any = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ : List[str] = SpeechTaHifiGan(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'vocoder': vocoder,
}
return components
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE_ : Tuple = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
'prompt': 'A hammer hitting a wooden surface',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
}
return inputs
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : List[Any] = AudioLDMPipeline(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = audioldm_pipe(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = output.audios[0]
assert audio.ndim == 1
assert len(_SCREAMING_SNAKE_CASE ) == 256
SCREAMING_SNAKE_CASE_ : int = audio[:10]
SCREAMING_SNAKE_CASE_ : str = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : str = AudioLDMPipeline(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Any = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE_ : Union[str, Any] = audioldm_pipe(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = output.audios[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE_ : str = audioldm_pipe.tokenizer(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
SCREAMING_SNAKE_CASE_ : List[str] = text_inputs['input_ids'].to(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = audioldm_pipe.text_encoder(
_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ : List[str] = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE_ : Tuple = F.normalize(_SCREAMING_SNAKE_CASE , dim=-1 )
SCREAMING_SNAKE_CASE_ : Optional[int] = prompt_embeds
# forward
SCREAMING_SNAKE_CASE_ : Union[str, Any] = audioldm_pipe(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Any = AudioLDMPipeline(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Any = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE_ : str = negative_prompt
SCREAMING_SNAKE_CASE_ : List[Any] = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE_ : Tuple = audioldm_pipe(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = output.audios[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE_ : List[str] = []
for p in [prompt, negative_prompt]:
SCREAMING_SNAKE_CASE_ : List[str] = audioldm_pipe.tokenizer(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
SCREAMING_SNAKE_CASE_ : Optional[int] = text_inputs['input_ids'].to(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = audioldm_pipe.text_encoder(
_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE_ : Any = F.normalize(_SCREAMING_SNAKE_CASE , dim=-1 )
embeds.append(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = embeds
# forward
SCREAMING_SNAKE_CASE_ : Tuple = audioldm_pipe(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Any = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = AudioLDMPipeline(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = 'egg cracking'
SCREAMING_SNAKE_CASE_ : str = audioldm_pipe(**_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = output.audios[0]
assert audio.ndim == 1
assert len(_SCREAMING_SNAKE_CASE ) == 256
SCREAMING_SNAKE_CASE_ : Optional[Any] = audio[:10]
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : str = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = AudioLDMPipeline(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = 'A hammer hitting a wooden surface'
# test num_waveforms_per_prompt=1 (default)
SCREAMING_SNAKE_CASE_ : List[str] = audioldm_pipe(_SCREAMING_SNAKE_CASE , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
SCREAMING_SNAKE_CASE_ : Any = 2
SCREAMING_SNAKE_CASE_ : Dict = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
SCREAMING_SNAKE_CASE_ : Optional[int] = 2
SCREAMING_SNAKE_CASE_ : List[str] = audioldm_pipe(_SCREAMING_SNAKE_CASE , num_inference_steps=2 , num_waveforms_per_prompt=_SCREAMING_SNAKE_CASE ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
SCREAMING_SNAKE_CASE_ : str = 2
SCREAMING_SNAKE_CASE_ : Tuple = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=_SCREAMING_SNAKE_CASE ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AudioLDMPipeline(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = audioldm_pipe.vocoder.config.sampling_rate
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = audioldm_pipe(audio_length_in_s=0.016 , **_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = output.audios[0]
assert audio.ndim == 1
assert len(_SCREAMING_SNAKE_CASE ) / vocoder_sampling_rate == 0.016
SCREAMING_SNAKE_CASE_ : Tuple = audioldm_pipe(audio_length_in_s=0.032 , **_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = output.audios[0]
assert audio.ndim == 1
assert len(_SCREAMING_SNAKE_CASE ) / vocoder_sampling_rate == 0.032
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AudioLDMPipeline(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = ['hey']
SCREAMING_SNAKE_CASE_ : Dict = audioldm_pipe(_SCREAMING_SNAKE_CASE , num_inference_steps=1 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = output.audios.shape
assert audio_shape == (1, 256)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
SCREAMING_SNAKE_CASE_ : int = SpeechTaHifiGan(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = audioldm_pipe(_SCREAMING_SNAKE_CASE , num_inference_steps=1 )
SCREAMING_SNAKE_CASE_ : int = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def UpperCAmelCase ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=_SCREAMING_SNAKE_CASE )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCAmelCase ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_SCREAMING_SNAKE_CASE )
@slow
class _A ( unittest.TestCase):
def UpperCAmelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="cpu" , _SCREAMING_SNAKE_CASE=torch.floataa , _SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.random.RandomState(_SCREAMING_SNAKE_CASE ).standard_normal((1, 8, 128, 16) )
SCREAMING_SNAKE_CASE_ : Tuple = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = {
'prompt': 'A hammer hitting a wooden surface',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 2.5,
}
return inputs
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
SCREAMING_SNAKE_CASE_ : Optional[int] = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_inputs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = 25
SCREAMING_SNAKE_CASE_ : Union[str, Any] = audioldm_pipe(**_SCREAMING_SNAKE_CASE ).audios[0]
assert audio.ndim == 1
assert len(_SCREAMING_SNAKE_CASE ) == 8_1920
SCREAMING_SNAKE_CASE_ : Any = audio[7_7230:7_7240]
SCREAMING_SNAKE_CASE_ : List[Any] = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
SCREAMING_SNAKE_CASE_ : int = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
SCREAMING_SNAKE_CASE_ : Optional[int] = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
SCREAMING_SNAKE_CASE_ : List[str] = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_inputs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = audioldm_pipe(**_SCREAMING_SNAKE_CASE ).audios[0]
assert audio.ndim == 1
assert len(_SCREAMING_SNAKE_CASE ) == 8_1920
SCREAMING_SNAKE_CASE_ : Union[str, Any] = audio[2_7780:2_7790]
SCREAMING_SNAKE_CASE_ : List[str] = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
SCREAMING_SNAKE_CASE_ : str = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 253 | 1 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
lowerCamelCase_ = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
lowerCamelCase_ = subprocess.check_output(f"""git diff --name-only {fork_point_sha}""".split()).decode('''utf-8''').split()
lowerCamelCase_ = '''|'''.join(sys.argv[1:])
lowerCamelCase_ = re.compile(Rf"""^({joined_dirs}).*?\.py$""")
lowerCamelCase_ = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''') | 34 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def UpperCamelCase( lowercase_ = "" ) -> dict[str, float]:
'''simple docstring'''
snake_case_ = url or """https://www.imdb.com/chart/top/?ref_=nv_mv_250"""
snake_case_ = BeautifulSoup(requests.get(lowercase_ ).text , """html.parser""" )
snake_case_ = soup.find_all("""td""" , attrs="""titleColumn""" )
snake_case_ = soup.find_all("""td""" , class_="""ratingColumn imdbRating""" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(lowercase_ , lowercase_ )
}
def UpperCamelCase( lowercase_ = "IMDb_Top_250_Movies.csv" ) -> None:
'''simple docstring'''
snake_case_ = get_imdb_top_aaa_movies()
with open(lowercase_ , """w""" , newline="""""" ) as out_file:
snake_case_ = csv.writer(lowercase_ )
writer.writerow(["""Movie title""", """IMDb rating"""] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies() | 34 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if "model" in orig_key:
__A = orig_key.replace('''model.''' , '''''' )
if "norm1" in orig_key:
__A = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
__A = orig_key.replace('''norm2''' , '''output.LayerNorm''' )
if "norm" in orig_key:
__A = orig_key.replace('''norm''' , '''LayerNorm''' )
if "transformer" in orig_key:
__A = orig_key.split('''.''' )[0].split('''_''' )[-1]
__A = orig_key.replace(f'transformer_{layer_num}' , f'encoder.layer.{layer_num}' )
if "mha.attn" in orig_key:
__A = orig_key.replace('''mha.attn''' , '''attention.self''' )
if "mha" in orig_key:
__A = orig_key.replace('''mha''' , '''attention''' )
if "W_q" in orig_key:
__A = orig_key.replace('''W_q''' , '''self.query''' )
if "W_k" in orig_key:
__A = orig_key.replace('''W_k''' , '''self.key''' )
if "W_v" in orig_key:
__A = orig_key.replace('''W_v''' , '''self.value''' )
if "ff1" in orig_key:
__A = orig_key.replace('''ff1''' , '''intermediate.dense''' )
if "ff2" in orig_key:
__A = orig_key.replace('''ff2''' , '''output.dense''' )
if "ff" in orig_key:
__A = orig_key.replace('''ff''' , '''output.dense''' )
if "mlm_class" in orig_key:
__A = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' )
if "mlm" in orig_key:
__A = orig_key.replace('''mlm''' , '''cls.predictions.transform''' )
if "cls" not in orig_key:
__A = 'yoso.' + orig_key
return orig_key
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__A = orig_state_dict.pop(__a )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
__A = val
__A = orig_state_dict['cls.predictions.decoder.bias']
__A = torch.arange(__a ).expand((1, -1) ) + 2
return orig_state_dict
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = torch.load(__a , map_location='''cpu''' )['model_state_dict']
__A = YosoConfig.from_json_file(__a )
__A = YosoForMaskedLM(__a )
__A = convert_checkpoint_helper(config.max_position_embeddings , __a )
print(model.load_state_dict(__a ) )
model.eval()
model.save_pretrained(__a )
print(f'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for YOSO model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase_ = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 266 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
_SCREAMING_SNAKE_CASE = 50_00_00
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = os.path.split(__file__)
_SCREAMING_SNAKE_CASE = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def SCREAMING_SNAKE_CASE__ ( __a , **__a ):
snake_case_ : int = dataset.map(**__a )
@get_duration
def SCREAMING_SNAKE_CASE__ ( __a , **__a ):
snake_case_ : Dict = dataset.filter(**__a )
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Tuple = {'num examples': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ : Dict = datasets.Features({'text': datasets.Value('string' ), 'numbers': datasets.Value('float32' )} )
snake_case_ : List[Any] = generate_example_dataset(
os.path.join(__a , 'dataset.arrow' ) , __a , num_examples=__a )
snake_case_ : str = transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=__a )
def tokenize(__a ):
return tokenizer(examples['text'] )
snake_case_ : Any = map(__a )
snake_case_ : Tuple = map(__a , batched=__a )
snake_case_ : str = map(__a , function=lambda __a : None , batched=__a )
with dataset.formatted_as(type='numpy' ):
snake_case_ : Optional[int] = map(__a , function=lambda __a : None , batched=__a )
with dataset.formatted_as(type='pandas' ):
snake_case_ : str = map(__a , function=lambda __a : None , batched=__a )
with dataset.formatted_as(type='torch' , columns='numbers' ):
snake_case_ : int = map(__a , function=lambda __a : None , batched=__a )
with dataset.formatted_as(type='tensorflow' , columns='numbers' ):
snake_case_ : List[Any] = map(__a , function=lambda __a : None , batched=__a )
snake_case_ : int = map(__a , function=__a , batched=__a )
snake_case_ : Optional[Any] = filter(__a )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(__a , 'wb' ) as f:
f.write(json.dumps(__a ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 327 | 0 |
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class a :
"""simple docstring"""
pass
| 81 | # DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :torch.FloatTensor
lowerCamelCase :torch.FloatTensor
class a ( __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :int = 1
@register_to_config
def __init__( self , lowerCAmelCase_ = 20_00 , lowerCAmelCase_ = 0.15 , lowerCAmelCase_ = 0.01 , lowerCAmelCase_ = 1348.0 , lowerCAmelCase_ = 1E-5 , lowerCAmelCase_ = 1 , ) -> Tuple:
# standard deviation of the initial noise distribution
_A = sigma_max
# setable values
_A = None
self.set_sigmas(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> torch.FloatTensor:
return sample
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> Tuple:
_A = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_A = torch.linspace(1 , lowerCAmelCase_ , lowerCAmelCase_ , device=lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> Any:
_A = sigma_min if sigma_min is not None else self.config.sigma_min
_A = sigma_max if sigma_max is not None else self.config.sigma_max
_A = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCAmelCase_ , lowerCAmelCase_ )
_A = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_A = torch.exp(torch.linspace(math.log(lowerCAmelCase_ ) , math.log(lowerCAmelCase_ ) , lowerCAmelCase_ ) )
_A = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = True , ) -> Union[SdeVeOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
_A = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
_A = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_A = timesteps.to(self.discrete_sigmas.device )
_A = self.discrete_sigmas[timesteps].to(sample.device )
_A = self.get_adjacent_sigma(lowerCAmelCase_ , lowerCAmelCase_ ).to(sample.device )
_A = torch.zeros_like(lowerCAmelCase_ )
_A = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_A = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
_A = diffusion.unsqueeze(-1 )
_A = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_A = randn_tensor(
sample.shape , layout=sample.layout , generator=lowerCAmelCase_ , device=sample.device , dtype=sample.dtype )
_A = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_A = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCAmelCase_ , prev_sample_mean=lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = True , ) -> Union[SchedulerOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_A = randn_tensor(sample.shape , layout=sample.layout , generator=lowerCAmelCase_ ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
_A = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
_A = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
_A = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_A = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_A = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
_A = step_size.unsqueeze(-1 )
_A = sample + step_size * model_output
_A = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_A = timesteps.to(original_samples.device )
_A = self.discrete_sigmas.to(original_samples.device )[timesteps]
_A = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCAmelCase_ ) * sigmas[:, None, None, None]
)
_A = noise + original_samples
return noisy_samples
def __len__( self ) -> List[str]:
return self.config.num_train_timesteps
| 81 | 1 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
A : Union[str, Any] = flax_key_tuple[:-1] + ('''weight''',)
A : Tuple = torch.permute(snake_case__ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case__ ):
# linear layer
A : Any = flax_key_tuple[:-1] + ('''weight''',)
A : Union[str, Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
A : int = flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if "metadata" in layer:
A : Union[str, Any] = layer.split('''metadata''' )
A : List[Any] = ''''''.join(split_layer[0] )[:-1]
A : int = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )]
elif "kvstore" in layer:
A : Any = layer.split('''kvstore''' )
A : List[Any] = ''''''.join(split_layer[0] )[:-1]
A : Optional[Any] = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )]
else:
A : Union[str, Any] = layer.split('''/''' )
A : Optional[int] = '''/'''.join(split_layer[:-1] )
A : Optional[Any] = (split_layer[-1],)
if "kvstore/path" in layer:
A : int = F'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
A : int = '''file'''
else:
A : Any = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Optional[int] = rename_keys(snake_case__ )
A : Tuple = {}
for k, v in current_block.items():
A : Tuple = v
A : List[str] = new_current_block
torch.save(snake_case__ , snake_case__ )
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = WEIGHTS_NAME ):
'''simple docstring'''
A : Dict = convert_file_size_to_int(snake_case__ )
A : str = []
A : str = {}
A : Any = 0
A : List[str] = 0
os.makedirs(snake_case__ , exist_ok=snake_case__ )
with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' , '''rb''' ) as fp:
A : Tuple = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target''']
A : List[Any] = flatten_dict(snake_case__ , sep='''/''' )
A : List[str] = {}
for layer in checkpoint_info.keys():
A, A, A : Tuple = get_key_and_tensorstore_dict(
snake_case__ , snake_case__ , snake_case__ )
if curr_real_layer_name in all_layers:
A : List[str] = content
else:
A : Optional[Any] = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
A : Any = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
A : List[Any] = torch.tensor(snake_case__ )
A : int = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
A, A : int = rename_base_flax_keys(tuple(key.split('''/''' ) ) , snake_case__ )
A : Union[str, Any] = '''/'''.join(snake_case__ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
A : List[Any] = os.path.join(
snake_case__ , weights_name.replace('''.bin''' , F'-{len(snake_case__ )+1:05d}-of-???.bin' ) )
rename_and_save_block(snake_case__ , snake_case__ )
sharded_state_dicts.append(current_block.keys() )
del current_block
A : Dict = {}
A : List[Any] = 0
A : List[Any] = raw_weights.to(getattr(snake_case__ , snake_case__ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
A : Optional[int] = os.path.join(snake_case__ , weights_name.replace('''.bin''' , F'-{len(snake_case__ )+1:05d}-of-???.bin' ) )
rename_and_save_block(snake_case__ , snake_case__ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(snake_case__ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
A : Union[str, Any] = {}
A : List[str] = {}
for idx, shard in enumerate(snake_case__ ):
A : int = weights_name.replace(
'''.bin''' , F'-{idx+1:05d}-of-{len(snake_case__ ):05d}.bin' ) # len(sharded_state_dicts):05d}
A : Union[str, Any] = os.path.join(snake_case__ , weights_name.replace('''.bin''' , F'-{idx+1:05d}-of-???.bin' ) )
os.rename(snake_case__ , os.path.join(snake_case__ , snake_case__ ) )
A : str = shard
for key in shard:
A : Tuple = shard_file
# Add the metadata
A : Tuple = {'''total_size''': total_size}
A : Optional[int] = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(snake_case__ , snake_case__ ) , '''w''' , encoding='''utf-8''' ) as f:
A : Union[str, Any] = json.dumps(snake_case__ , indent=2 , sort_keys=snake_case__ ) + '''\n'''
f.write(snake_case__ )
return metadata, index
if __name__ == "__main__":
lowercase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
lowercase : List[str] = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def lowerCAmelCase_ ( ):
'''simple docstring'''
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
A : Optional[Any] = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' )
config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' )
A : Any = SwitchTransformersForConditionalGeneration.from_pretrained(
'''/home/arthur_huggingface_co/transformers/switch_converted''' , device_map='''auto''' )
A : Any = TaTokenizer.from_pretrained('''t5-small''' )
A : Union[str, Any] = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
A : List[Any] = tokenizer(snake_case__ , return_tensors='''pt''' ).input_ids
A : Optional[Any] = model.generate(snake_case__ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 3 |
"""simple docstring"""
import sys
from collections import defaultdict
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
__lowerCAmelCase = []
def snake_case ( self , __a ):
return self.node_position[vertex]
def snake_case ( self , __a , __a ):
__lowerCAmelCase = pos
def snake_case ( self , __a , __a , __a , __a ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowerCAmelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowerCAmelCase = 2 * start + 1
else:
__lowerCAmelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowerCAmelCase , __lowerCAmelCase = heap[smallest_child], positions[smallest_child]
__lowerCAmelCase , __lowerCAmelCase = (
heap[start],
positions[start],
)
__lowerCAmelCase , __lowerCAmelCase = temp, tempa
__lowerCAmelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __a )
self.top_to_bottom(__a , __a , __a , __a )
def snake_case ( self , __a , __a , __a , __a ):
__lowerCAmelCase = position[index]
while index != 0:
__lowerCAmelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowerCAmelCase = heap[parent]
__lowerCAmelCase = position[parent]
self.set_position(position[parent] , __a )
else:
__lowerCAmelCase = val
__lowerCAmelCase = temp
self.set_position(__a , __a )
break
__lowerCAmelCase = parent
else:
__lowerCAmelCase = val
__lowerCAmelCase = temp
self.set_position(__a , 0 )
def snake_case ( self , __a , __a ):
__lowerCAmelCase = len(__a ) // 2 - 1
for i in range(__a , -1 , -1 ):
self.top_to_bottom(__a , __a , len(__a ) , __a )
def snake_case ( self , __a , __a ):
__lowerCAmelCase = positions[0]
__lowerCAmelCase = sys.maxsize
self.top_to_bottom(__a , 0 , len(__a ) , __a )
return temp
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = Heap()
__lowerCAmelCase = [0] * len(_UpperCamelCase )
__lowerCAmelCase = [-1] * len(_UpperCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowerCAmelCase = [] # Heap of Distance of vertices from their neighboring vertex
__lowerCAmelCase = []
for vertex in range(len(_UpperCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_UpperCamelCase )
heap.node_position.append(_UpperCamelCase )
__lowerCAmelCase = []
__lowerCAmelCase = 1
__lowerCAmelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowerCAmelCase = 0
__lowerCAmelCase = distance
heap.heapify(_UpperCamelCase , _UpperCamelCase )
for _ in range(1 , len(_UpperCamelCase ) ):
__lowerCAmelCase = heap.delete_minimum(_UpperCamelCase , _UpperCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__lowerCAmelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_UpperCamelCase )]
):
__lowerCAmelCase = distance
heap.bottom_to_top(
_UpperCamelCase , heap.get_position(_UpperCamelCase ) , _UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
A : Optional[Any] = int(input("Enter number of edges: ").strip())
A : Dict = defaultdict(list)
for _ in range(edges_number):
A : str = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 57 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
a_ : int = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[Any] =['pixel_values']
def __init__( self, lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = PILImageResampling.BICUBIC, lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = True, lowerCAmelCase = 1 / 255, lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = True, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(**lowerCAmelCase )
lowerCamelCase_ =size if size is not None else {'''shortest_edge''': 224}
lowerCamelCase_ =get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase )
lowerCamelCase_ =crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowerCamelCase_ =get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase, param_name='''crop_size''' )
lowerCamelCase_ =do_resize
lowerCamelCase_ =size
lowerCamelCase_ =resample
lowerCamelCase_ =do_center_crop
lowerCamelCase_ =crop_size
lowerCamelCase_ =do_rescale
lowerCamelCase_ =rescale_factor
lowerCamelCase_ =do_normalize
lowerCamelCase_ =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCamelCase_ =image_std if image_std is not None else OPENAI_CLIP_STD
lowerCamelCase_ =do_convert_rgb
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = PILImageResampling.BICUBIC, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
lowerCamelCase_ =get_resize_output_image_size(lowerCAmelCase, size=size['''shortest_edge'''], default_to_square=lowerCAmelCase )
return resize(lowerCAmelCase, size=lowerCAmelCase, resample=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =get_size_dict(lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(lowerCAmelCase, size=(size['''height'''], size['''width''']), data_format=lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
return rescale(lowerCAmelCase, scale=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
return normalize(lowerCAmelCase, mean=lowerCAmelCase, std=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = ChannelDimension.FIRST, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ =size if size is not None else self.size
lowerCamelCase_ =get_size_dict(lowerCAmelCase, param_name='''size''', default_to_square=lowerCAmelCase )
lowerCamelCase_ =resample if resample is not None else self.resample
lowerCamelCase_ =do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase_ =crop_size if crop_size is not None else self.crop_size
lowerCamelCase_ =get_size_dict(lowerCAmelCase, param_name='''crop_size''', default_to_square=lowerCAmelCase )
lowerCamelCase_ =do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ =rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ =do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ =image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ =image_std if image_std is not None else self.image_std
lowerCamelCase_ =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCamelCase_ =make_list_of_images(lowerCAmelCase )
if not valid_images(lowerCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCamelCase_ =[convert_to_rgb(lowerCAmelCase ) for image in images]
# All transformations expect numpy arrays.
lowerCamelCase_ =[to_numpy_array(lowerCAmelCase ) for image in images]
if do_resize:
lowerCamelCase_ =[self.resize(image=lowerCAmelCase, size=lowerCAmelCase, resample=lowerCAmelCase ) for image in images]
if do_center_crop:
lowerCamelCase_ =[self.center_crop(image=lowerCAmelCase, size=lowerCAmelCase ) for image in images]
if do_rescale:
lowerCamelCase_ =[self.rescale(image=lowerCAmelCase, scale=lowerCAmelCase ) for image in images]
if do_normalize:
lowerCamelCase_ =[self.normalize(image=lowerCAmelCase, mean=lowerCAmelCase, std=lowerCAmelCase ) for image in images]
lowerCamelCase_ =[to_channel_dimension_format(lowerCAmelCase, lowerCAmelCase ) for image in images]
lowerCamelCase_ ={'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase, tensor_type=lowerCAmelCase )
| 6 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
a_ : List[str] = logging.get_logger(__name__)
a_ : Optional[Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
a_ : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __UpperCamelCase :
lowercase : str =field(
default=lowerCamelCase__ , metadata={'help': 'Model type selected in the list: ' + ', '.join(lowerCamelCase__ )} )
lowercase : str =field(
default=lowerCamelCase__ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowercase : int =field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowercase : int =field(
default=1_28 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowercase : int =field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowercase : int =field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowercase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowercase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowercase : float =field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowercase : int =field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowercase : int =field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowercase : int =field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[Any] ='train'
lowercase : Any ='dev'
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : SquadDataTrainingArguments
lowercase : List[SquadFeatures]
lowercase : Split
lowercase : bool
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = Split.train, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = "pt", ):
"""simple docstring"""
lowerCamelCase_ =args
lowerCamelCase_ =is_language_sensitive
lowerCamelCase_ =SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowerCAmelCase, lowerCAmelCase ):
try:
lowerCamelCase_ =Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
lowerCamelCase_ =mode
# Load data features from cache or dataset file
lowerCamelCase_ ='''v2''' if args.version_2_with_negative else '''v1'''
lowerCamelCase_ =os.path.join(
cache_dir if cache_dir is not None else args.data_dir, f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''', )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase_ =cached_features_file + '''.lock'''
with FileLock(lowerCAmelCase ):
if os.path.exists(lowerCAmelCase ) and not args.overwrite_cache:
lowerCamelCase_ =time.time()
lowerCamelCase_ =torch.load(lowerCAmelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCamelCase_ =self.old_features['''features''']
lowerCamelCase_ =self.old_features.get('''dataset''', lowerCAmelCase )
lowerCamelCase_ =self.old_features.get('''examples''', lowerCAmelCase )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''', time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
''' future run''' )
else:
if mode == Split.dev:
lowerCamelCase_ =self.processor.get_dev_examples(args.data_dir )
else:
lowerCamelCase_ =self.processor.get_train_examples(args.data_dir )
lowerCamelCase_, lowerCamelCase_ =squad_convert_examples_to_features(
examples=self.examples, tokenizer=lowerCAmelCase, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=mode == Split.train, threads=args.threads, return_dataset=lowerCAmelCase, )
lowerCamelCase_ =time.time()
torch.save(
{'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples}, lowerCAmelCase, )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.features[i]
lowerCamelCase_ =torch.tensor(feature.input_ids, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.attention_mask, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.token_type_ids, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.cls_index, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.p_mask, dtype=torch.float )
lowerCamelCase_ =torch.tensor(feature.is_impossible, dtype=torch.float )
lowerCamelCase_ ={
'''input_ids''': input_ids,
'''attention_mask''': attention_mask,
'''token_type_ids''': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'''is_impossible''': is_impossible} )
if self.is_language_sensitive:
inputs.update({'''langs''': (torch.ones(input_ids.shape, dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCamelCase_ =torch.tensor(feature.start_position, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.end_position, dtype=torch.long )
inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions} )
return inputs
| 6 | 1 |
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger()
@dataclass
class snake_case :
a_ : nn.Module
a_ : List[nn.Module] = field(default_factory=SCREAMING_SNAKE_CASE_ )
a_ : list = field(default_factory=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->int:
a_ = len(list(m.modules())) == 1 or isinstance(__UpperCAmelCase , nn.Convad) or isinstance(__UpperCAmelCase , nn.BatchNormad)
if has_not_submodules:
self.traced.append(__UpperCAmelCase)
def __call__( self , __UpperCAmelCase) ->Optional[int]:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook))
self.module(__UpperCAmelCase)
[x.remove() for x in self.handles]
return self
@property
def UpperCAmelCase__ ( self) ->int:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda __UpperCAmelCase: len(list(x.state_dict().keys())) > 0 , self.traced))
@dataclass
class snake_case :
a_ : nn.Module
a_ : nn.Module
a_ : int = 0
a_ : List = field(default_factory=SCREAMING_SNAKE_CASE_ )
a_ : List = field(default_factory=SCREAMING_SNAKE_CASE_ )
def __call__( self , __UpperCAmelCase) ->int:
a_ = Tracker(self.dest)(__UpperCAmelCase).parametrized
a_ = Tracker(self.src)(__UpperCAmelCase).parametrized
a_ = list(filter(lambda __UpperCAmelCase: type(__UpperCAmelCase) not in self.src_skip , __UpperCAmelCase))
a_ = list(filter(lambda __UpperCAmelCase: type(__UpperCAmelCase) not in self.dest_skip , __UpperCAmelCase))
if len(__UpperCAmelCase) != len(__UpperCAmelCase):
raise Exception(
F'''Numbers of operations are different. Source module has {len(__UpperCAmelCase)} operations while'''
F''' destination module has {len(__UpperCAmelCase)}.''')
for dest_m, src_m in zip(__UpperCAmelCase , __UpperCAmelCase):
dest_m.load_state_dict(src_m.state_dict())
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''')
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = True ) ->Any:
"""simple docstring"""
print(F'''Converting {name}...''' )
with torch.no_grad():
a_ = timm.create_model(UpperCAmelCase , pretrained=UpperCAmelCase ).eval()
a_ = ResNetForImageClassification(UpperCAmelCase ).eval()
a_ = ModuleTransfer(src=UpperCAmelCase , dest=UpperCAmelCase )
a_ = torch.randn((1, 3, 224, 224) )
module_transfer(UpperCAmelCase )
assert torch.allclose(from_model(UpperCAmelCase ) , our_model(UpperCAmelCase ).logits ), "The model logits don't match the original one."
a_ = F'''resnet{"-".join(name.split("resnet" ) )}'''
print(UpperCAmelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=UpperCAmelCase , )
# we can use the convnext one
a_ = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=UpperCAmelCase , )
print(F'''Pushed {checkpoint_name}''' )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = True ) ->int:
"""simple docstring"""
a_ = "imagenet-1k-id2label.json"
a_ = 1_000
a_ = (1, num_labels)
a_ = "huggingface/label-files"
a_ = num_labels
a_ = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type="dataset" ) , "r" ) )
a_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
a_ = idalabel
a_ = {v: k for k, v in idalabel.items()}
a_ = partial(UpperCAmelCase , num_labels=UpperCAmelCase , idalabel=UpperCAmelCase , labelaid=UpperCAmelCase )
a_ = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(UpperCAmelCase , names_to_config[model_name] , UpperCAmelCase , UpperCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return config, expected_shape
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported resnet* architecture,'
' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 243 |
"""simple docstring"""
def UpperCamelCase ( UpperCAmelCase ) ->list[int]:
"""simple docstring"""
if length <= 0 or not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(UpperCAmelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10)) | 243 | 1 |
'''simple docstring'''
import math
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase_ ( snake_case__ = 1_0001 ):
'''simple docstring'''
try:
A : Optional[Any] = int(snake_case__ )
except (TypeError, ValueError):
raise TypeError('''Parameter nth must be int or castable to int.''' ) from None
if nth <= 0:
raise ValueError('''Parameter nth must be greater than or equal to one.''' )
A : list[int] = []
A : Optional[Any] = 2
while len(snake_case__ ) < nth:
if is_prime(snake_case__ ):
primes.append(snake_case__ )
num += 1
else:
num += 1
return primes[len(snake_case__ ) - 1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 311 |
'''simple docstring'''
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Optional[int] = np.max(_outputs , axis=-1 , keepdims=snake_case__ )
A : Any = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=snake_case__ )
class A ( __snake_case ):
__magic_name__ = '''sigmoid'''
__magic_name__ = '''softmax'''
__magic_name__ = '''none'''
@add_end_docstrings(
__snake_case , R'''
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `"default"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `"sigmoid"`: Applies the sigmoid function on the output.
- `"softmax"`: Applies the softmax function on the output.
- `"none"`: Does not apply any function on the output.
''' , )
class A ( __snake_case ):
__magic_name__ = False
__magic_name__ = ClassificationFunction.NONE
def __init__( self , **SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="" , **SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
A : Optional[Any] = tokenizer_kwargs
A : int = {}
if hasattr(self.model.config , '''return_all_scores''' ) and return_all_scores is None:
A : int = self.model.config.return_all_scores
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or top_k is None:
A : Union[str, Any] = top_k
A : Dict = False
elif return_all_scores is not None:
warnings.warn(
'''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'''
''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''' , SCREAMING_SNAKE_CASE , )
if return_all_scores:
A : Optional[int] = None
else:
A : Dict = 1
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : Dict = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
A : int = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
A : str = super().__call__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
A : Any = '''top_k''' not in kwargs
if isinstance(args[0] , SCREAMING_SNAKE_CASE ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict[str, GenericTensor]:
"""simple docstring"""
A : List[Any] = self.framework
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return self.tokenizer(**SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) == 1 and isinstance(inputs[0] , SCREAMING_SNAKE_CASE ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'''
''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''' )
return self.tokenizer(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
return self.model(**SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=True ) -> List[str]:
"""simple docstring"""
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
A : Optional[int] = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
A : Any = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , '''function_to_apply''' ) and function_to_apply is None:
A : Optional[int] = self.model.config.function_to_apply
else:
A : Optional[int] = ClassificationFunction.NONE
A : Any = model_outputs['''logits'''][0]
A : List[Any] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
A : int = sigmoid(SCREAMING_SNAKE_CASE )
elif function_to_apply == ClassificationFunction.SOFTMAX:
A : Any = softmax(SCREAMING_SNAKE_CASE )
elif function_to_apply == ClassificationFunction.NONE:
A : int = outputs
else:
raise ValueError(F'Unrecognized `function_to_apply` argument: {function_to_apply}' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
A : int = [
{'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(SCREAMING_SNAKE_CASE )
]
if not _legacy:
dict_scores.sort(key=lambda SCREAMING_SNAKE_CASE : x["score"] , reverse=SCREAMING_SNAKE_CASE )
if top_k is not None:
A : Union[str, Any] = dict_scores[:top_k]
return dict_scores
| 311 | 1 |
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCamelCase_ : Optional[int] = 16
lowerCamelCase_ : Dict = 32
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase = 16 ):
"""simple docstring"""
A_ : List[str] = AutoTokenizer.from_pretrained('bert-base-cased' )
A_ : int = load_dataset('glue' , 'mrpc' )
def tokenize_function(_UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
A_ : Dict = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A_ : Optional[Any] = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A_ : Optional[int] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A_ : List[str] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A_ : Tuple = 16
elif accelerator.mixed_precision != "no":
A_ : Optional[Any] = 8
else:
A_ : Optional[int] = None
return tokenizer.pad(
_UpperCAmelCase , padding='longest' , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors='pt' , )
# Instantiate dataloaders.
A_ : Union[str, Any] = DataLoader(
tokenized_datasets['train'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
A_ : List[Any] = DataLoader(
tokenized_datasets['validation'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCamelCase_ : str = mocked_dataloaders # noqa: F811
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if os.environ.get('TESTING_MOCKED_DATALOADERS' , _UpperCAmelCase ) == "1":
A_ : List[Any] = 2
# Initialize accelerator
A_ : List[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A_ : List[str] = config['lr']
A_ : List[str] = int(config['num_epochs'] )
A_ : Optional[int] = int(config['seed'] )
A_ : Optional[Any] = int(config['batch_size'] )
A_ : Optional[int] = evaluate.load('glue' , 'mrpc' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=_UpperCAmelCase )
def inner_training_loop(_UpperCAmelCase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(_UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A_ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=_UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A_ : Dict = model.to(accelerator.device )
# Instantiate optimizer
A_ : Any = AdamW(params=model.parameters() , lr=_UpperCAmelCase )
A_ , A_ : Tuple = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase )
# Instantiate scheduler
A_ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A_ , A_ , A_ , A_ , A_ : Tuple = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase ):
model.train()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A_ : Optional[int] = model(**_UpperCAmelCase )
A_ : Tuple = outputs.loss
accelerator.backward(_UpperCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A_ : str = model(**_UpperCAmelCase )
A_ : int = outputs.logits.argmax(dim=-1 )
A_ , A_ : List[str] = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=_UpperCAmelCase , references=_UpperCAmelCase , )
A_ : Union[str, Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , _UpperCAmelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def UpperCAmelCase__ ( ):
"""simple docstring"""
A_ : Tuple = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
A_ : str = parser.parse_args()
A_ : Tuple = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main() | 286 |
"""simple docstring"""
from copy import deepcopy
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , snake_case_ = None , snake_case_ = None ):
"""simple docstring"""
if arr is None and size is not None:
A_ : Union[str, Any] = size
A_ : List[str] = [0] * size
elif arr is not None:
self.init(snake_case_ )
else:
raise ValueError('Either arr or size must be specified' )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : Union[str, Any] = len(snake_case_ )
A_ : Optional[int] = deepcopy(snake_case_ )
for i in range(1 , self.size ):
A_ : Optional[Any] = self.next_(snake_case_ )
if j < self.size:
self.tree[j] += self.tree[i]
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : int = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
A_ : Optional[int] = self.next_(snake_case_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def lowerCamelCase_ ( snake_case_ ):
"""simple docstring"""
return index + (index & (-index))
@staticmethod
def lowerCamelCase_ ( snake_case_ ):
"""simple docstring"""
return index - (index & (-index))
def lowerCamelCase_ ( self , snake_case_ , snake_case_ ):
"""simple docstring"""
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
A_ : List[str] = self.next_(snake_case_ )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ ):
"""simple docstring"""
self.add(snake_case_ , value - self.get(snake_case_ ) )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
if right == 0:
return 0
A_ : Any = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
A_ : Tuple = self.prev(snake_case_ )
return result
def lowerCamelCase_ ( self , snake_case_ , snake_case_ ):
"""simple docstring"""
return self.prefix(snake_case_ ) - self.prefix(snake_case_ )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
return self.query(snake_case_ , index + 1 )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
value -= self.tree[0]
if value < 0:
return -1
A_ : List[Any] = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
A_ : Tuple = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod() | 286 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A_ :Tuple = logging.get_logger(__name__)
A_ :Union[str, Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A_ :Optional[int] = {
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
A_ :Dict = {
'''gpt-neox-20b''': 2048,
}
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : Tuple =VOCAB_FILES_NAMES
UpperCamelCase__ : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Tuple =["""input_ids""", """attention_mask"""]
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__="<|endoftext|>" , lowerCamelCase__="<|endoftext|>" , lowerCamelCase__="<|endoftext|>" , lowerCamelCase__=False , **lowerCamelCase__ , ):
"""simple docstring"""
super().__init__(
lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , unk_token=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , **lowerCamelCase__ , )
__UpperCamelCase : Optional[Any] =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowerCamelCase__ ) != add_prefix_space:
__UpperCamelCase : Tuple =getattr(lowerCamelCase__ , pre_tok_state.pop('type' ) )
__UpperCamelCase : List[str] =add_prefix_space
__UpperCamelCase : Union[str, Any] =pre_tok_class(**lowerCamelCase__ )
__UpperCamelCase : Dict =add_prefix_space
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) + [self.eos_token_id] )
if len(lowerCamelCase__ ) > self.model_max_length:
__UpperCamelCase : Any =input_ids[-self.model_max_length :]
return input_ids
| 358 |
from __future__ import annotations
def A ( a_ ) -> int:
if not nums:
return 0
__UpperCamelCase : Any =nums[0]
__UpperCamelCase : Any =0
for num in nums[1:]:
__UpperCamelCase , __UpperCamelCase : List[Any] =(
max_excluding + num,
max(a_ ,a_ ),
)
return max(a_ ,a_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 245 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_a = 'platform'
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, ):
if attention_mask is None:
UpperCAmelCase_ : Union[str, Any] = np.where(input_ids != config.pad_token_id, 1, 0 )
if decoder_attention_mask is None:
UpperCAmelCase_ : Optional[int] = np.where(decoder_input_ids != config.pad_token_id, 1, 0 )
if head_mask is None:
UpperCAmelCase_ : int = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ : Union[str, Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ : List[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=7 , lowercase_=True , lowercase_=False , lowercase_=99 , lowercase_=16 , lowercase_=2 , lowercase_=4 , lowercase_=4 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=32 , lowercase_=2 , lowercase_=1 , lowercase_=0 , lowercase_=0.02 , ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : Tuple = batch_size
UpperCAmelCase_ : str = seq_length
UpperCAmelCase_ : Dict = is_training
UpperCAmelCase_ : List[Any] = use_labels
UpperCAmelCase_ : Optional[int] = vocab_size
UpperCAmelCase_ : int = hidden_size
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : List[str] = intermediate_size
UpperCAmelCase_ : Optional[int] = hidden_act
UpperCAmelCase_ : str = hidden_dropout_prob
UpperCAmelCase_ : int = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[Any] = max_position_embeddings
UpperCAmelCase_ : str = eos_token_id
UpperCAmelCase_ : str = pad_token_id
UpperCAmelCase_ : str = bos_token_id
UpperCAmelCase_ : List[Any] = initializer_range
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
UpperCAmelCase_ : Any = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
UpperCAmelCase_ : str = shift_tokens_right(lowercase_ , 1 , 2 )
UpperCAmelCase_ : str = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowercase_ , )
UpperCAmelCase_ : Optional[int] = prepare_blenderbot_inputs_dict(lowercase_ , lowercase_ , lowercase_ )
return config, inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = 20
UpperCAmelCase_ : int = model_class_name(lowercase_ )
UpperCAmelCase_ : Optional[int] = model.encode(inputs_dict["input_ids"] )
UpperCAmelCase_ , UpperCAmelCase_ : Any = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
UpperCAmelCase_ : Any = model.init_cache(decoder_input_ids.shape[0] , lowercase_ , lowercase_ )
UpperCAmelCase_ : Tuple = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
UpperCAmelCase_ : Union[str, Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase_ : int = model.decode(
decoder_input_ids[:, :-1] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=lowercase_ , decoder_position_ids=lowercase_ , )
UpperCAmelCase_ : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
UpperCAmelCase_ : Dict = model.decode(
decoder_input_ids[:, -1:] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase_ , )
UpperCAmelCase_ : Optional[Any] = model.decode(lowercase_ , lowercase_ )
UpperCAmelCase_ : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = 20
UpperCAmelCase_ : Any = model_class_name(lowercase_ )
UpperCAmelCase_ : Tuple = model.encode(inputs_dict["input_ids"] )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
UpperCAmelCase_ : Optional[Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCAmelCase_ : int = model.init_cache(decoder_input_ids.shape[0] , lowercase_ , lowercase_ )
UpperCAmelCase_ : List[str] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase_ : List[str] = model.decode(
decoder_input_ids[:, :-1] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=lowercase_ , decoder_position_ids=lowercase_ , )
UpperCAmelCase_ : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
UpperCAmelCase_ : Dict = model.decode(
decoder_input_ids[:, -1:] , lowercase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase_ , decoder_position_ids=lowercase_ , )
UpperCAmelCase_ : Dict = model.decode(lowercase_ , lowercase_ , decoder_attention_mask=lowercase_ )
UpperCAmelCase_ : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class A_ (unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = 99
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
UpperCAmelCase_ : Any = input_ids.shape[0]
UpperCAmelCase_ : Dict = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self._get_config_and_data()
UpperCAmelCase_ : List[str] = FlaxBlenderbotSmallForConditionalGeneration(lowercase_ )
UpperCAmelCase_ : Optional[int] = lm_model(input_ids=lowercase_ )
UpperCAmelCase_ : Optional[int] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
UpperCAmelCase_ : Optional[int] = FlaxBlenderbotSmallForConditionalGeneration(lowercase_ )
UpperCAmelCase_ : str = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
UpperCAmelCase_ : str = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
UpperCAmelCase_ : Tuple = lm_model(input_ids=lowercase_ , decoder_input_ids=lowercase_ )
UpperCAmelCase_ : Tuple = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
UpperCAmelCase_ : Dict = shift_tokens_right(lowercase_ , 1 , 2 )
UpperCAmelCase_ : Tuple = np.equal(lowercase_ , 1 ).astype(np.floataa ).sum()
UpperCAmelCase_ : Optional[Any] = np.equal(lowercase_ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(lowercase_ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class A_ (lowercase__ ,unittest.TestCase ,lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
SCREAMING_SNAKE_CASE__ : List[Any] = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = FlaxBlenderbotSmallModelTester(self )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowercase_ , lowercase_ , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowercase_ , lowercase_ , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ : List[Any] = self._prepare_for_class(lowercase_ , lowercase_ )
UpperCAmelCase_ : Dict = model_class(lowercase_ )
@jax.jit
def encode_jitted(lowercase_ , lowercase_=None , **lowercase_ ):
return model.encode(input_ids=lowercase_ , attention_mask=lowercase_ )
with self.subTest("JIT Enabled" ):
UpperCAmelCase_ : List[Any] = encode_jitted(**lowercase_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCAmelCase_ : Optional[Any] = encode_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ , lowercase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ : Optional[int] = model_class(lowercase_ )
UpperCAmelCase_ : Tuple = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
UpperCAmelCase_ : int = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(lowercase_ , lowercase_ , lowercase_ ):
return model.decode(
decoder_input_ids=lowercase_ , decoder_attention_mask=lowercase_ , encoder_outputs=lowercase_ , )
with self.subTest("JIT Enabled" ):
UpperCAmelCase_ : str = decode_jitted(**lowercase_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCAmelCase_ : List[Any] = decode_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ , lowercase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = model_class_name.from_pretrained("facebook/blenderbot_small-90M" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
UpperCAmelCase_ : List[str] = np.ones((1, 1) ) * model.config.eos_token_id
UpperCAmelCase_ : Optional[int] = model(lowercase_ )
self.assertIsNotNone(lowercase_ )
| 61 |
import doctest
from collections import deque
import numpy as np
class __SCREAMING_SNAKE_CASE:
def __init__( self: Dict ) -> None:
snake_case__ = [2, 1, 2, -1]
snake_case__ = [1, 2, 3, 4]
def lowerCAmelCase_ ( self: List[str] ) -> list[float]:
snake_case__ = len(self.first_signal )
snake_case__ = len(self.second_signal )
snake_case__ = max(UpperCamelCase , UpperCamelCase )
# create a zero matrix of max_length x max_length
snake_case__ = [[0] * max_length for i in range(UpperCamelCase )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(UpperCamelCase ):
snake_case__ = deque(self.second_signal )
rotated_signal.rotate(UpperCamelCase )
for j, item in enumerate(UpperCamelCase ):
matrix[i][j] += item
# multiply the matrix with the first signal
snake_case__ = np.matmul(np.transpose(UpperCamelCase ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(UpperCamelCase , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 307 | 0 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_snake_case = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class a__ ( unittest.TestCase ):
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Union[str, Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
_lowercase : Any = self.diffusers_dir
shutil.copy(
os.path.join(_UpperCamelCase , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : str = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
"""simple docstring"""
_lowercase : Optional[int] = comment + f'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
_lowercase : Optional[int] = comment + f'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
_lowercase : Optional[int] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_lowercase : int = black.format_str(_UpperCamelCase , mode=_UpperCamelCase )
_lowercase : Dict = os.path.join(self.diffusers_dir , "new_code.py" )
with open(_UpperCamelCase , "w" , newline="\n" ) as f:
f.write(_UpperCamelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_UpperCamelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_UpperCamelCase )
with open(_UpperCamelCase , "r" ) as f:
self.assertTrue(f.read() , _UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Any = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , _UpperCamelCase , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , _UpperCamelCase ) , )
# Copy consistency with a really long name
_lowercase : Optional[Any] = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
f'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' , f'''{long_class_name}SchedulerOutput''' , re.sub("Bert" , _UpperCamelCase , _UpperCamelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , _UpperCamelCase , overwrite_result=re.sub("DDPM" , "Test" , _UpperCamelCase ) , )
| 370 |
'''simple docstring'''
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 199 | 0 |
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
_a : Optional[Any] = 'src/transformers'
_a : Any = 'docs/source/en/tasks'
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : List[Any] ,_lowerCamelCase : Optional[Any] ) -> int:
with open(_lowerCamelCase ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
_lowerCAmelCase : List[str] = f.readlines()
# Find the start prompt.
_lowerCAmelCase : Any = 0
while not lines[start_index].startswith(_lowerCamelCase ):
start_index += 1
start_index += 1
_lowerCAmelCase : List[Any] = start_index
while not lines[end_index].startswith(_lowerCamelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
_a : Union[str, Any] = direct_transformers_import(TRANSFORMERS_PATH)
_a : Optional[int] = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
_a : Tuple = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ) -> Optional[int]:
_lowerCAmelCase : int = TASK_GUIDE_TO_MODELS[task_guide]
_lowerCAmelCase : Any = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(_lowerCamelCase ,set() )
_lowerCAmelCase : List[Any] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Optional[Any]=False ) -> Dict:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = _find_text_in_file(
filename=os.path.join(_lowerCamelCase ,_lowerCamelCase ) ,start_prompt="""<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->""" ,end_prompt="""<!--End of the generated tip-->""" ,)
_lowerCAmelCase : Any = get_model_list_for_task(_lowerCamelCase )
if current_list != new_list:
if overwrite:
with open(os.path.join(_lowerCamelCase ,_lowerCamelCase ) ,"""w""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
""" to fix this.""" )
if __name__ == "__main__":
_a : List[str] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
_a : Dict = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 44 |
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 1 / sqrt(2 ) ):
UpperCAmelCase_ : int = tau * frequency / samplerate
UpperCAmelCase_ : List[str] = sin(__lowerCamelCase )
UpperCAmelCase_ : int = cos(__lowerCamelCase )
UpperCAmelCase_ : Optional[Any] = _sin / (2 * q_factor)
UpperCAmelCase_ : int = (1 - _cos) / 2
UpperCAmelCase_ : Optional[Any] = 1 - _cos
UpperCAmelCase_ : int = 1 + alpha
UpperCAmelCase_ : Dict = -2 * _cos
UpperCAmelCase_ : Tuple = 1 - alpha
UpperCAmelCase_ : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 1 / sqrt(2 ) ):
UpperCAmelCase_ : Dict = tau * frequency / samplerate
UpperCAmelCase_ : Tuple = sin(__lowerCamelCase )
UpperCAmelCase_ : Any = cos(__lowerCamelCase )
UpperCAmelCase_ : List[str] = _sin / (2 * q_factor)
UpperCAmelCase_ : List[Any] = (1 + _cos) / 2
UpperCAmelCase_ : Optional[int] = -1 - _cos
UpperCAmelCase_ : Union[str, Any] = 1 + alpha
UpperCAmelCase_ : Optional[int] = -2 * _cos
UpperCAmelCase_ : Tuple = 1 - alpha
UpperCAmelCase_ : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 1 / sqrt(2 ) ):
UpperCAmelCase_ : Union[str, Any] = tau * frequency / samplerate
UpperCAmelCase_ : str = sin(__lowerCamelCase )
UpperCAmelCase_ : Tuple = cos(__lowerCamelCase )
UpperCAmelCase_ : List[Any] = _sin / (2 * q_factor)
UpperCAmelCase_ : Any = _sin / 2
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : Tuple = -ba
UpperCAmelCase_ : Optional[Any] = 1 + alpha
UpperCAmelCase_ : Dict = -2 * _cos
UpperCAmelCase_ : Optional[int] = 1 - alpha
UpperCAmelCase_ : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 1 / sqrt(2 ) ):
UpperCAmelCase_ : Any = tau * frequency / samplerate
UpperCAmelCase_ : Any = sin(__lowerCamelCase )
UpperCAmelCase_ : Optional[int] = cos(__lowerCamelCase )
UpperCAmelCase_ : str = _sin / (2 * q_factor)
UpperCAmelCase_ : List[str] = 1 - alpha
UpperCAmelCase_ : str = -2 * _cos
UpperCAmelCase_ : Any = 1 + alpha
UpperCAmelCase_ : Tuple = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba], [ba, ba, ba] )
return filt
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 1 / sqrt(2 ), ):
UpperCAmelCase_ : Dict = tau * frequency / samplerate
UpperCAmelCase_ : Union[str, Any] = sin(__lowerCamelCase )
UpperCAmelCase_ : int = cos(__lowerCamelCase )
UpperCAmelCase_ : Optional[int] = _sin / (2 * q_factor)
UpperCAmelCase_ : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase_ : List[Any] = 1 + alpha * big_a
UpperCAmelCase_ : Tuple = -2 * _cos
UpperCAmelCase_ : Tuple = 1 - alpha * big_a
UpperCAmelCase_ : str = 1 + alpha / big_a
UpperCAmelCase_ : List[str] = -2 * _cos
UpperCAmelCase_ : List[str] = 1 - alpha / big_a
UpperCAmelCase_ : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 1 / sqrt(2 ), ):
UpperCAmelCase_ : str = tau * frequency / samplerate
UpperCAmelCase_ : int = sin(__lowerCamelCase )
UpperCAmelCase_ : Optional[Any] = cos(__lowerCamelCase )
UpperCAmelCase_ : Tuple = _sin / (2 * q_factor)
UpperCAmelCase_ : List[Any] = 10 ** (gain_db / 40)
UpperCAmelCase_ : Tuple = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase_ : int = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase_ : Optional[Any] = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase_ : Optional[int] = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase_ : Dict = 2 * sqrt(__lowerCamelCase ) * alpha
UpperCAmelCase_ : List[str] = big_a * (pmc + aaa)
UpperCAmelCase_ : int = 2 * big_a * mpc
UpperCAmelCase_ : int = big_a * (pmc - aaa)
UpperCAmelCase_ : Dict = ppmc + aaa
UpperCAmelCase_ : Any = -2 * pmpc
UpperCAmelCase_ : List[str] = ppmc - aaa
UpperCAmelCase_ : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 1 / sqrt(2 ), ):
UpperCAmelCase_ : int = tau * frequency / samplerate
UpperCAmelCase_ : Optional[Any] = sin(__lowerCamelCase )
UpperCAmelCase_ : Optional[Any] = cos(__lowerCamelCase )
UpperCAmelCase_ : Optional[Any] = _sin / (2 * q_factor)
UpperCAmelCase_ : Tuple = 10 ** (gain_db / 40)
UpperCAmelCase_ : Tuple = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase_ : Optional[Any] = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase_ : List[Any] = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase_ : Any = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase_ : Dict = 2 * sqrt(__lowerCamelCase ) * alpha
UpperCAmelCase_ : Any = big_a * (ppmc + aaa)
UpperCAmelCase_ : Union[str, Any] = -2 * big_a * pmpc
UpperCAmelCase_ : Dict = big_a * (ppmc - aaa)
UpperCAmelCase_ : Optional[int] = pmc + aaa
UpperCAmelCase_ : Union[str, Any] = 2 * mpc
UpperCAmelCase_ : int = pmc - aaa
UpperCAmelCase_ : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
| 61 | 0 |
"""simple docstring"""
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def _A ( lowercase ):
"""simple docstring"""
a ={}
a =job['''started_at''']
a =job['''completed_at''']
a =date_parser.parse(lowercase )
a =date_parser.parse(lowercase )
a =round((end_datetime - start_datetime).total_seconds() / 60.0 )
a =start
a =end
a =duration_in_min
return job_info
def _A ( lowercase , lowercase=None ):
"""simple docstring"""
a =None
if token is not None:
a ={'''Accept''': '''application/vnd.github+json''', '''Authorization''': f'''Bearer {token}'''}
a =f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
a =requests.get(lowercase , headers=lowercase ).json()
a ={}
try:
job_time.update({job['''name''']: extract_time_from_single_job(lowercase ) for job in result['''jobs''']} )
a =math.ceil((result['''total_count'''] - 1_00) / 1_00 )
for i in range(lowercase ):
a =requests.get(url + f'''&page={i + 2}''' , headers=lowercase ).json()
job_time.update({job['''name''']: extract_time_from_single_job(lowercase ) for job in result['''jobs''']} )
return job_time
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
lowerCamelCase_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
lowerCamelCase_ : List[Any] = parser.parse_args()
lowerCamelCase_ : str = get_job_time(args.workflow_run_id)
lowerCamelCase_ : Tuple = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'{k}: {v["duration"]}') | 215 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowerCamelCase_ : Optional[int] = None
lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase_ : str = {"""vocab_file""": """sentencepiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase_ : int = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
"""tokenizer_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/tokenizer.json""",
},
}
lowerCamelCase_ : Optional[Any] = {
"""google/rembert""": 2_5_6,
}
lowerCamelCase_ : Optional[Any] = """▁"""
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = RemBertTokenizer
def __init__( self , __A=None , __A=None , __A=True , __A=True , __A=False , __A="[CLS]" , __A="[SEP]" , __A="<unk>" , __A="[SEP]" , __A="<pad>" , __A="[CLS]" , __A="[MASK]" , **__A , ) -> Dict:
# Mask token behave like a normal word, i.e. include the space before it
a =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
__A , tokenizer_file=__A , do_lower_case=__A , remove_space=__A , keep_accents=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , **__A , )
a =do_lower_case
a =remove_space
a =keep_accents
a =vocab_file
a =False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> List[int]:
a =[self.sep_token_id]
a =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , __A , __A = None , __A = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__A )) + [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1]
def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> List[int]:
a =[self.sep_token_id]
a =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> Tuple[str]:
if not os.path.isdir(__A ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(__A ) )
return
a =os.path.join(
__A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file , __A )
return (out_vocab_file,) | 215 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
__SCREAMING_SNAKE_CASE :List[Any] = None
__SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :List[str] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__SCREAMING_SNAKE_CASE :List[Any] = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''',
},
}
__SCREAMING_SNAKE_CASE :Optional[Any] = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
__SCREAMING_SNAKE_CASE :Optional[int] = '''▁'''
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
_lowerCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : int = AlbertTokenizer
def __init__( self : Optional[Any] , snake_case_ : Optional[Any]=None , snake_case_ : Optional[Any]=None , snake_case_ : Optional[Any]=True , snake_case_ : str=True , snake_case_ : Tuple=False , snake_case_ : List[Any]="[CLS]" , snake_case_ : Union[str, Any]="[SEP]" , snake_case_ : str="<unk>" , snake_case_ : Union[str, Any]="[SEP]" , snake_case_ : List[Any]="<pad>" , snake_case_ : List[str]="[CLS]" , snake_case_ : int="[MASK]" , **snake_case_ : Any , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_UpperCAmelCase = (
AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ , normalized=snake_case_ )
if isinstance(snake_case_ , snake_case_ )
else mask_token
)
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , remove_space=snake_case_ , keep_accents=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , **snake_case_ , )
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = remove_space
_UpperCAmelCase = keep_accents
_UpperCAmelCase = vocab_file
_UpperCAmelCase = False if not self.vocab_file else True
def lowercase ( self : Union[str, Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase ( self : Dict , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase ( self : Optional[Any] , snake_case_ : str , snake_case_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(snake_case_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase = os.path.join(
snake_case_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ):
copyfile(self.vocab_file , snake_case_ )
return (out_vocab_file,)
| 22 |
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
__lowercase : Optional[Any] = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
__lowercase : Any = BASE_URL + '''/user'''
# https://github.com/settings/tokens
__lowercase : Any = os.environ.get('''USER_TOKEN''', '''''')
def lowercase_ ( _lowercase ) -> dict[Any, Any]:
'''simple docstring'''
lowerCamelCase_ : str = {
'''Authorization''': F"""token {auth_token}""",
'''Accept''': '''application/vnd.github.v3+json''',
}
return requests.get(_lowercase , headers=_lowercase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'{key}: {value}')
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 318 | 0 |
"""simple docstring"""
from ....utils import logging
UpperCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase_ ( _lowercase):
def __init__( self : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any]=None , __UpperCamelCase : str=2048 ) -> Dict:
_UpperCamelCase = config.__dict__
_UpperCamelCase = modal_hidden_size
if num_labels:
_UpperCamelCase = num_labels
| 368 | """simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( _lowercase):
snake_case__ = ['''image_processor''', '''tokenizer''']
snake_case__ = '''BlipImageProcessor'''
snake_case__ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] ) -> int:
_UpperCamelCase = False
super().__init__(__UpperCamelCase , __UpperCamelCase )
_UpperCamelCase = self.image_processor
def __call__( self : Any , __UpperCamelCase : ImageInput = None , __UpperCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __UpperCamelCase : bool = True , __UpperCamelCase : Union[bool, str, PaddingStrategy] = False , __UpperCamelCase : Union[bool, str, TruncationStrategy] = None , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : int = 0 , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Union[str, TensorType]] = None , **__UpperCamelCase : List[str] , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
_UpperCamelCase = self.tokenizer
_UpperCamelCase = self.tokenizer(
text=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
return text_encoding
# add pixel_values
_UpperCamelCase = self.image_processor(__UpperCamelCase , return_tensors=__UpperCamelCase )
if text is not None:
_UpperCamelCase = self.tokenizer(
text=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
else:
_UpperCamelCase = None
if text_encoding is not None:
encoding_image_processor.update(__UpperCamelCase )
return encoding_image_processor
def _UpperCamelCase ( self : Union[str, Any] , *__UpperCamelCase : str , **__UpperCamelCase : Any ) -> List[Any]:
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def _UpperCamelCase ( self : Optional[int] , *__UpperCamelCase : List[Any] , **__UpperCamelCase : str ) -> str:
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
def _UpperCamelCase ( self : List[str] ) -> Dict:
_UpperCamelCase = self.tokenizer.model_input_names
_UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 54 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
A =logging.get_logger(__name__)
A ={
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class _a ( __a ):
__a : Optional[int] = """gptj"""
__a : List[str] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : int , lowercase : str=50_400 , lowercase : Optional[Any]=2_048 , lowercase : Dict=4_096 , lowercase : int=28 , lowercase : Optional[Any]=16 , lowercase : Union[str, Any]=64 , lowercase : Optional[int]=None , lowercase : List[str]="gelu_new" , lowercase : Optional[int]=0.0 , lowercase : Tuple=0.0 , lowercase : Dict=0.0 , lowercase : int=1E-5 , lowercase : List[str]=0.02 , lowercase : Optional[Any]=True , lowercase : Any=50_256 , lowercase : Tuple=50_256 , lowercase : Optional[int]=False , **lowercase : Dict , ):
'''simple docstring'''
UpperCAmelCase = vocab_size
UpperCAmelCase = n_positions
UpperCAmelCase = n_embd
UpperCAmelCase = n_layer
UpperCAmelCase = n_head
UpperCAmelCase = n_inner
UpperCAmelCase = rotary_dim
UpperCAmelCase = activation_function
UpperCAmelCase = resid_pdrop
UpperCAmelCase = embd_pdrop
UpperCAmelCase = attn_pdrop
UpperCAmelCase = layer_norm_epsilon
UpperCAmelCase = initializer_range
UpperCAmelCase = use_cache
UpperCAmelCase = bos_token_id
UpperCAmelCase = eos_token_id
super().__init__(
bos_token_id=lowercase , eos_token_id=lowercase , tie_word_embeddings=lowercase , **lowercase )
class _a ( __a ):
def __init__( self : Any , lowercase : PretrainedConfig , lowercase : str = "default" , lowercase : List[PatchingSpec] = None , lowercase : bool = False , ):
'''simple docstring'''
super().__init__(lowercase , task=lowercase , patching_specs=lowercase , use_past=lowercase )
if not getattr(self._config , '''pad_token_id''' , lowercase ):
# TODO: how to do that better?
UpperCAmelCase = 0
@property
def A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(lowercase , direction='''inputs''' )
UpperCAmelCase = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def A ( self : str ):
'''simple docstring'''
return self._config.n_layer
@property
def A ( self : List[Any] ):
'''simple docstring'''
return self._config.n_head
def A ( self : List[str] , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
UpperCAmelCase = super(lowercase , self ).generate_dummy_inputs(
lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
# We need to order the input in the way they appears in the forward()
UpperCAmelCase = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCAmelCase , UpperCAmelCase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
UpperCAmelCase = seqlen + 2
UpperCAmelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCAmelCase = [
(torch.zeros(lowercase ), torch.zeros(lowercase )) for _ in range(self.num_layers )
]
UpperCAmelCase = common_inputs['''attention_mask''']
if self.use_past:
UpperCAmelCase = ordered_inputs['''attention_mask'''].dtype
UpperCAmelCase = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(lowercase , lowercase , dtype=lowercase )] , dim=1 )
return ordered_inputs
@property
def A ( self : Dict ):
'''simple docstring'''
return 13
| 34 |
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A =logging.get_logger(__name__)
A ={
'b0': efficientnet.EfficientNetBa,
'b1': efficientnet.EfficientNetBa,
'b2': efficientnet.EfficientNetBa,
'b3': efficientnet.EfficientNetBa,
'b4': efficientnet.EfficientNetBa,
'b5': efficientnet.EfficientNetBa,
'b6': efficientnet.EfficientNetBa,
'b7': efficientnet.EfficientNetBa,
}
A ={
'b0': {
'hidden_dim': 12_80,
'width_coef': 1.0,
'depth_coef': 1.0,
'image_size': 2_24,
'dropout_rate': 0.2,
'dw_padding': [],
},
'b1': {
'hidden_dim': 12_80,
'width_coef': 1.0,
'depth_coef': 1.1,
'image_size': 2_40,
'dropout_rate': 0.2,
'dw_padding': [16],
},
'b2': {
'hidden_dim': 14_08,
'width_coef': 1.1,
'depth_coef': 1.2,
'image_size': 2_60,
'dropout_rate': 0.3,
'dw_padding': [5, 8, 16],
},
'b3': {
'hidden_dim': 15_36,
'width_coef': 1.2,
'depth_coef': 1.4,
'image_size': 3_00,
'dropout_rate': 0.3,
'dw_padding': [5, 18],
},
'b4': {
'hidden_dim': 17_92,
'width_coef': 1.4,
'depth_coef': 1.8,
'image_size': 3_80,
'dropout_rate': 0.4,
'dw_padding': [6],
},
'b5': {
'hidden_dim': 20_48,
'width_coef': 1.6,
'depth_coef': 2.2,
'image_size': 4_56,
'dropout_rate': 0.4,
'dw_padding': [13, 27],
},
'b6': {
'hidden_dim': 23_04,
'width_coef': 1.8,
'depth_coef': 2.6,
'image_size': 5_28,
'dropout_rate': 0.5,
'dw_padding': [31],
},
'b7': {
'hidden_dim': 25_60,
'width_coef': 2.0,
'depth_coef': 3.1,
'image_size': 6_00,
'dropout_rate': 0.5,
'dw_padding': [18],
},
}
def snake_case_ (_a : List[str] ):
UpperCAmelCase = EfficientNetConfig()
UpperCAmelCase = CONFIG_MAP[model_name]['''hidden_dim''']
UpperCAmelCase = CONFIG_MAP[model_name]['''width_coef''']
UpperCAmelCase = CONFIG_MAP[model_name]['''depth_coef''']
UpperCAmelCase = CONFIG_MAP[model_name]['''image_size''']
UpperCAmelCase = CONFIG_MAP[model_name]['''dropout_rate''']
UpperCAmelCase = CONFIG_MAP[model_name]['''dw_padding''']
UpperCAmelCase = '''huggingface/label-files'''
UpperCAmelCase = '''imagenet-1k-id2label.json'''
UpperCAmelCase = 1_0_0_0
UpperCAmelCase = json.load(open(hf_hub_download(_a , _a , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase = {int(_a ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def snake_case_ ():
UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase = Image.open(requests.get(_a , stream=_a ).raw )
return im
def snake_case_ (_a : str ):
UpperCAmelCase = CONFIG_MAP[model_name]['''image_size''']
UpperCAmelCase = EfficientNetImageProcessor(
size={'''height''': size, '''width''': size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.4785_3944, 0.473_2864, 0.4743_4163] , do_center_crop=_a , )
return preprocessor
def snake_case_ (_a : Optional[Any] ):
UpperCAmelCase = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )]
UpperCAmelCase = sorted(set(_a ) )
UpperCAmelCase = len(_a )
UpperCAmelCase = {b: str(_a ) for b, i in zip(_a , range(_a ) )}
UpperCAmelCase = []
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') )
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') )
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') )
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') )
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') )
for b in block_names:
UpperCAmelCase = block_name_mapping[b]
rename_keys.append((F"block{b}_expand_conv/kernel:0", F"encoder.blocks.{hf_b}.expansion.expand_conv.weight") )
rename_keys.append((F"block{b}_expand_bn/gamma:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.weight") )
rename_keys.append((F"block{b}_expand_bn/beta:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.bias") )
rename_keys.append(
(F"block{b}_expand_bn/moving_mean:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.running_mean") )
rename_keys.append(
(F"block{b}_expand_bn/moving_variance:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.running_var") )
rename_keys.append(
(F"block{b}_dwconv/depthwise_kernel:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight") )
rename_keys.append((F"block{b}_bn/gamma:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight") )
rename_keys.append((F"block{b}_bn/beta:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias") )
rename_keys.append(
(F"block{b}_bn/moving_mean:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean") )
rename_keys.append(
(F"block{b}_bn/moving_variance:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var") )
rename_keys.append((F"block{b}_se_reduce/kernel:0", F"encoder.blocks.{hf_b}.squeeze_excite.reduce.weight") )
rename_keys.append((F"block{b}_se_reduce/bias:0", F"encoder.blocks.{hf_b}.squeeze_excite.reduce.bias") )
rename_keys.append((F"block{b}_se_expand/kernel:0", F"encoder.blocks.{hf_b}.squeeze_excite.expand.weight") )
rename_keys.append((F"block{b}_se_expand/bias:0", F"encoder.blocks.{hf_b}.squeeze_excite.expand.bias") )
rename_keys.append(
(F"block{b}_project_conv/kernel:0", F"encoder.blocks.{hf_b}.projection.project_conv.weight") )
rename_keys.append((F"block{b}_project_bn/gamma:0", F"encoder.blocks.{hf_b}.projection.project_bn.weight") )
rename_keys.append((F"block{b}_project_bn/beta:0", F"encoder.blocks.{hf_b}.projection.project_bn.bias") )
rename_keys.append(
(F"block{b}_project_bn/moving_mean:0", F"encoder.blocks.{hf_b}.projection.project_bn.running_mean") )
rename_keys.append(
(F"block{b}_project_bn/moving_variance:0", F"encoder.blocks.{hf_b}.projection.project_bn.running_var") )
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') )
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') )
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') )
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') )
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') )
UpperCAmelCase = {}
for item in rename_keys:
if item[0] in original_param_names:
UpperCAmelCase = '''efficientnet.''' + item[1]
UpperCAmelCase = '''classifier.weight'''
UpperCAmelCase = '''classifier.bias'''
return key_mapping
def snake_case_ (_a : Dict , _a : List[str] , _a : Dict ):
for key, value in tf_params.items():
if "normalization" in key:
continue
UpperCAmelCase = key_mapping[key]
if "_conv" in key and "kernel" in key:
UpperCAmelCase = torch.from_numpy(_a ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
UpperCAmelCase = torch.from_numpy(_a ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
UpperCAmelCase = torch.from_numpy(np.transpose(_a ) )
else:
UpperCAmelCase = torch.from_numpy(_a )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_a )
@torch.no_grad()
def snake_case_ (_a : Optional[Any] , _a : List[str] , _a : Optional[int] , _a : Dict ):
UpperCAmelCase = model_classes[model_name](
include_top=_a , weights='''imagenet''' , input_tensor=_a , input_shape=_a , pooling=_a , classes=1_0_0_0 , classifier_activation='''softmax''' , )
UpperCAmelCase = original_model.trainable_variables
UpperCAmelCase = original_model.non_trainable_variables
UpperCAmelCase = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
UpperCAmelCase = param.numpy()
UpperCAmelCase = list(tf_params.keys() )
# Load HuggingFace model
UpperCAmelCase = get_efficientnet_config(_a )
UpperCAmelCase = EfficientNetForImageClassification(_a ).eval()
UpperCAmelCase = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''' )
UpperCAmelCase = rename_keys(_a )
replace_params(_a , _a , _a )
# Initialize preprocessor and preprocess input image
UpperCAmelCase = convert_image_processor(_a )
UpperCAmelCase = preprocessor(images=prepare_img() , return_tensors='''pt''' )
# HF model inference
hf_model.eval()
with torch.no_grad():
UpperCAmelCase = hf_model(**_a )
UpperCAmelCase = outputs.logits.detach().numpy()
# Original model inference
UpperCAmelCase = False
UpperCAmelCase = CONFIG_MAP[model_name]['''image_size''']
UpperCAmelCase = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
UpperCAmelCase = image.img_to_array(_a )
UpperCAmelCase = np.expand_dims(_a , axis=0 )
UpperCAmelCase = original_model.predict(_a )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_a , _a , atol=1E-3 ), "The predicted logits are not the same."
print('''Model outputs match!''' )
if save_model:
# Create folder to save model
if not os.path.isdir(_a ):
os.mkdir(_a )
# Save converted model and image processor
hf_model.save_pretrained(_a )
preprocessor.save_pretrained(_a )
if push_to_hub:
# Push model and image processor to hub
print(F"Pushing converted {model_name} to the hub..." )
UpperCAmelCase = F"efficientnet-{model_name}"
preprocessor.push_to_hub(_a )
hf_model.push_to_hub(_a )
if __name__ == "__main__":
A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='b0',
type=str,
help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='hf_model',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--save_model', action='store_true', help='Save model to local')
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
A =parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 34 | 1 |
"""simple docstring"""
from bisect import bisect
from itertools import accumulate
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : Any = sorted(zip(lowercase__ , lowercase__ ) , key=lambda lowercase__ : x[0] / x[1] , reverse=lowercase__ )
_lowerCamelCase, _lowerCamelCase : Any = [i[0] for i in r], [i[1] for i in r]
_lowerCamelCase : List[str] = list(accumulate(lowercase__ ) )
_lowerCamelCase : List[Any] = bisect(lowercase__ , lowercase__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 12 |
"""simple docstring"""
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
lowercase__ = Path(__file__).resolve().parents[3] / """src"""
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
lowercase__ = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""}
lowercase__ = """zero2"""
lowercase__ = """zero3"""
lowercase__ = [ZEROa, ZEROa]
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
_lowerCamelCase : List[str] = parameterized.to_safe_name('_'.join(str(lowercase__ ) for x in param.args ) )
return f'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
lowercase__ = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
def A_ ( self , lowercase ):
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def A_ ( self , lowercase , lowercase , lowercase = 10 , lowercase = True , lowercase = True , lowercase = True , ):
_lowerCamelCase : List[str] = models[model]
_lowerCamelCase : Optional[int] = self.run_trainer(
stage=lowercase , model_name=lowercase , eval_steps=lowercase , num_train_epochs=1 , distributed=lowercase , fpaa=lowercase , )
self.do_checks(lowercase )
return output_dir
def A_ ( self , lowercase , lowercase , lowercase = 10 , lowercase = 1 , lowercase = True , lowercase = True , ):
_lowerCamelCase : List[str] = self.get_auto_remove_tmp_dir('./xxx' , after=lowercase )
_lowerCamelCase : Any = F'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(lowercase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_lowerCamelCase : Optional[int] = F'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
_lowerCamelCase : Optional[Any] = [F'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
_lowerCamelCase : Dict = self.get_launcher(lowercase )
_lowerCamelCase : Union[str, Any] = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowercase , env=self.get_env() )
return output_dir
def A_ ( self , lowercase=False ):
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
_lowerCamelCase : Any = min(2 , get_gpu_count() ) if distributed else 1
return F'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split() | 12 | 1 |
import os
from collections.abc import Iterator
def UpperCAmelCase_( a__ = "." ):
"""simple docstring"""
for dir_path, dir_names, filenames in os.walk(a__ ):
SCREAMING_SNAKE_CASE : Dict = [d for d in dir_names if d != '''scripts''' and d[0] not in '''._''']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(a__ )[1] in (".py", ".ipynb"):
yield os.path.join(a__ , a__ ).lstrip('''./''' )
def UpperCAmelCase_( a__ ):
"""simple docstring"""
return F"""{i * " "}*""" if i else "\n##"
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(a__ ) or old_parts[i] != new_part) and new_part:
print(F"""{md_prefix(a__ )} {new_part.replace("_" , " " ).title()}""" )
return new_path
def UpperCAmelCase_( a__ = "." ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = ''''''
for filepath in sorted(good_file_paths(a__ ) ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = os.path.split(a__ )
if filepath != old_path:
SCREAMING_SNAKE_CASE : Tuple = print_path(a__ , a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = (filepath.count(os.sep ) + 1) if filepath else 0
SCREAMING_SNAKE_CASE : Dict = F"""{filepath}/{filename}""".replace(''' ''' , '''%20''' )
SCREAMING_SNAKE_CASE : Optional[int] = os.path.splitext(filename.replace('''_''' , ''' ''' ).title() )[0]
print(F"""{md_prefix(a__ )} [{filename}]({url})""" )
if __name__ == "__main__":
print_directory_md('''.''')
| 313 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : List[str] = {
'''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''',
'''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''',
'''kssteven/ibert-roberta-large-mnli''': (
'''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'''
),
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = 'ibert'
def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-12 , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase="absolute" , _lowerCamelCase=False , _lowerCamelCase="none" , **_lowerCamelCase , ) ->Any:
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : str = position_embedding_type
SCREAMING_SNAKE_CASE : Optional[int] = quant_mode
SCREAMING_SNAKE_CASE : Dict = force_dequant
class a_ ( a__ ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 313 | 1 |
"""simple docstring"""
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def UpperCamelCase_( _snake_case : int , _snake_case : Optional[Any] , _snake_case : Optional[Any] ):
"""simple docstring"""
__a =1.5
__a =int(factor * num_class_images )
__a =ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=a__ , aesthetic_weight=0.1 )
os.makedirs(F'{class_data_dir}/images' , exist_ok=a__ )
if len(list(Path(F'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
__a =client.query(text=a__ )
if len(a__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
__a =int(factor * num_images )
__a =ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=a__ , aesthetic_weight=0.1 , )
__a =0
__a =0
__a =tqdm(desc='downloading real regularization images' , total=a__ )
with open(F'{class_data_dir}/caption.txt' , 'w' ) as fa, open(F'{class_data_dir}/urls.txt' , 'w' ) as fa, open(
F'{class_data_dir}/images.txt' , 'w' ) as fa:
while total < num_class_images:
__a =class_images[count]
count += 1
try:
__a =requests.get(images['url'] )
if img.status_code == 200:
__a =Image.open(BytesIO(img.content ) )
with open(F'{class_data_dir}/images/{total}.jpg' , 'wb' ) as f:
f.write(img.content )
fa.write(images['caption'] + '\n' )
fa.write(images['url'] + '\n' )
fa.write(F'{class_data_dir}/images/{total}.jpg' + '\n' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def UpperCamelCase_( ):
"""simple docstring"""
__a =argparse.ArgumentParser('' , add_help=a__ )
parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=a__ , type=a__ )
parser.add_argument('--class_data_dir' , help='path to save images' , required=a__ , type=a__ )
parser.add_argument('--num_class_images' , help='number of images to download' , default=200 , type=a__ )
return parser.parse_args()
if __name__ == "__main__":
_lowerCAmelCase : Dict = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 371 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
_lowerCAmelCase : Optional[Any] = numpy.array([0, 0])
_lowerCAmelCase : Dict = numpy.array([0.5, 0.8660254])
_lowerCAmelCase : Any = numpy.array([1, 0])
_lowerCAmelCase : int = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def UpperCamelCase_( _snake_case : list[numpy.ndarray] , _snake_case : int ):
"""simple docstring"""
__a =initial_vectors
for _ in range(_snake_case ):
__a =iteration_step(_snake_case )
return vectors
def UpperCamelCase_( _snake_case : list[numpy.ndarray] ):
"""simple docstring"""
__a =[]
for i, start_vector in enumerate(vectors[:-1] ):
__a =vectors[i + 1]
new_vectors.append(_snake_case )
__a =end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def UpperCamelCase_( _snake_case : numpy.ndarray , _snake_case : float ):
"""simple docstring"""
__a =numpy.radians(_snake_case )
__a , __a =numpy.cos(_snake_case ), numpy.sin(_snake_case )
__a =numpy.array(((c, -s), (s, c)) )
return numpy.dot(_snake_case , _snake_case )
def UpperCamelCase_( _snake_case : list[numpy.ndarray] ):
"""simple docstring"""
__a =plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
__a , __a =zip(*_snake_case )
plt.plot(_snake_case , _snake_case )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : List[Any] = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 308 | 0 |
'''simple docstring'''
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def snake_case_ (_a : Union[str, Any] , _a : List[Any]="shi-labs/oneformer_demo" ):
with open(hf_hub_download(snake_case_ , snake_case_ , repo_type='''dataset''' ) , '''r''' ) as f:
UpperCAmelCase = json.load(snake_case_ )
UpperCAmelCase = {}
UpperCAmelCase = []
UpperCAmelCase = []
for key, info in class_info.items():
UpperCAmelCase = info['''name''']
class_names.append(info['''name'''] )
if info["isthing"]:
thing_ids.append(int(snake_case_ ) )
UpperCAmelCase = thing_ids
UpperCAmelCase = class_names
return metadata
class _a ( unittest.TestCase ):
def __init__( self : Tuple , lowercase : Tuple , lowercase : int=7 , lowercase : List[Any]=3 , lowercase : Dict=30 , lowercase : Optional[Any]=400 , lowercase : Any=None , lowercase : str=True , lowercase : List[Any]=True , lowercase : List[str]=[0.5, 0.5, 0.5] , lowercase : Tuple=[0.5, 0.5, 0.5] , lowercase : List[Any]=10 , lowercase : Dict=False , lowercase : int=255 , lowercase : List[str]="shi-labs/oneformer_demo" , lowercase : Tuple="ade20k_panoptic.json" , lowercase : Optional[int]=10 , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = num_channels
UpperCAmelCase = min_resolution
UpperCAmelCase = max_resolution
UpperCAmelCase = do_resize
UpperCAmelCase = {'''shortest_edge''': 32, '''longest_edge''': 1_333} if size is None else size
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean
UpperCAmelCase = image_std
UpperCAmelCase = class_info_file
UpperCAmelCase = prepare_metadata(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = num_text
UpperCAmelCase = repo_path
# for the post_process_functions
UpperCAmelCase = 2
UpperCAmelCase = 10
UpperCAmelCase = 10
UpperCAmelCase = 3
UpperCAmelCase = 4
UpperCAmelCase = num_labels
UpperCAmelCase = do_reduce_labels
UpperCAmelCase = ignore_index
def A ( self : Optional[int] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def A ( self : List[Any] , lowercase : Optional[Any] , lowercase : List[Any]=False ):
'''simple docstring'''
if not batched:
UpperCAmelCase = image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE__ , Image.Image ):
UpperCAmelCase , UpperCAmelCase = image.size
else:
UpperCAmelCase , UpperCAmelCase = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase = int(self.size['''shortest_edge'''] * h / w )
UpperCAmelCase = self.size['''shortest_edge''']
elif w > h:
UpperCAmelCase = self.size['''shortest_edge''']
UpperCAmelCase = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCAmelCase = self.size['''shortest_edge''']
UpperCAmelCase = self.size['''shortest_edge''']
else:
UpperCAmelCase = []
for image in image_inputs:
UpperCAmelCase , UpperCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase = max(SCREAMING_SNAKE_CASE__ , key=lambda lowercase : item[0] )[0]
UpperCAmelCase = max(SCREAMING_SNAKE_CASE__ , key=lambda lowercase : item[1] )[1]
return expected_height, expected_width
def A ( self : int ):
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class _a ( UpperCAmelCase__ , unittest.TestCase ):
__a : List[Any] = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
__a : Dict = image_processing_class
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = OneFormerImageProcessorTester(self )
@property
def A ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''image_mean''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''image_std''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_resize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''size''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''ignore_index''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''class_info_file''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''num_text''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''repo_path''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''metadata''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_reduce_labels''' ) )
def A ( self : List[Any] ):
'''simple docstring'''
pass
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
UpperCAmelCase = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase , UpperCAmelCase = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase , UpperCAmelCase = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = image_processor(
SCREAMING_SNAKE_CASE__ , ['''semantic'''] * len(SCREAMING_SNAKE_CASE__ ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
# Test not batched input
UpperCAmelCase = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase , UpperCAmelCase = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase , UpperCAmelCase = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = image_processor(
SCREAMING_SNAKE_CASE__ , ['''semantic'''] * len(SCREAMING_SNAKE_CASE__ ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
UpperCAmelCase = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase , UpperCAmelCase = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase , UpperCAmelCase = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = image_processor(
SCREAMING_SNAKE_CASE__ , ['''semantic'''] * len(SCREAMING_SNAKE_CASE__ ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : str , lowercase : List[str]=False , lowercase : List[str]=False , lowercase : Dict="np" ):
'''simple docstring'''
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
UpperCAmelCase = self.image_processing_tester.num_labels
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
if with_segmentation_maps:
UpperCAmelCase = num_labels
if is_instance_map:
UpperCAmelCase = list(range(SCREAMING_SNAKE_CASE__ ) ) * 2
UpperCAmelCase = dict(enumerate(SCREAMING_SNAKE_CASE__ ) )
UpperCAmelCase = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
UpperCAmelCase = [Image.fromarray(SCREAMING_SNAKE_CASE__ ) for annotation in annotations]
UpperCAmelCase = image_processor(
SCREAMING_SNAKE_CASE__ , ['''semantic'''] * len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , instance_id_to_semantic_id=SCREAMING_SNAKE_CASE__ , pad_and_return_pixel_mask=SCREAMING_SNAKE_CASE__ , )
return inputs
def A ( self : str ):
'''simple docstring'''
pass
def A ( self : Optional[int] ):
'''simple docstring'''
def common(lowercase : Optional[int]=False , lowercase : List[str]=None ):
UpperCAmelCase = self.comm_get_image_processor_inputs(
with_segmentation_maps=SCREAMING_SNAKE_CASE__ , is_instance_map=SCREAMING_SNAKE_CASE__ , segmentation_type=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = inputs['''mask_labels''']
UpperCAmelCase = inputs['''class_labels''']
UpperCAmelCase = inputs['''pixel_values''']
UpperCAmelCase = inputs['''text_inputs''']
# check the batch_size
for mask_label, class_label, text_input in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=SCREAMING_SNAKE_CASE__ )
common(is_instance_map=SCREAMING_SNAKE_CASE__ , segmentation_type='''pil''' )
common(is_instance_map=SCREAMING_SNAKE_CASE__ , segmentation_type='''pil''' )
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = np.zeros((20, 50) )
UpperCAmelCase = 1
UpperCAmelCase = 1
UpperCAmelCase = 1
UpperCAmelCase = binary_mask_to_rle(SCREAMING_SNAKE_CASE__ )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
UpperCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase = fature_extractor.post_process_semantic_segmentation(SCREAMING_SNAKE_CASE__ )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
UpperCAmelCase = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
UpperCAmelCase = fature_extractor.post_process_semantic_segmentation(SCREAMING_SNAKE_CASE__ , target_sizes=SCREAMING_SNAKE_CASE__ )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
UpperCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase = image_processor.post_process_instance_segmentation(SCREAMING_SNAKE_CASE__ , threshold=0 )
self.assertTrue(len(SCREAMING_SNAKE_CASE__ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
UpperCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase = image_processor.post_process_panoptic_segmentation(SCREAMING_SNAKE_CASE__ , threshold=0 )
self.assertTrue(len(SCREAMING_SNAKE_CASE__ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 34 | '''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
_A : int = True
except (ImportError, ModuleNotFoundError):
_A : Optional[Any] = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def UpperCamelCase_ ( snake_case_ : str ) -> str:
'''simple docstring'''
re.sub("""<n>""" , """""" , snake_case_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(snake_case_ ) )
| 229 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : int ):
snake_case_ : Tuple = tempfile.mkdtemp()
snake_case_ : Optional[int] = BlipImageProcessor()
snake_case_ : Any = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
snake_case_ : Union[str, Any] = BlipaProcessor(_lowercase , _lowercase )
processor.save_pretrained(self.tmpdirname )
def _snake_case ( self : Dict , **lowercase_ : Dict ):
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase ).tokenizer
def _snake_case ( self : int , **lowercase_ : Union[str, Any] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase ).image_processor
def _snake_case ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def _snake_case ( self : Any ):
snake_case_ : Optional[int] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
snake_case_ : Optional[Any] = [Image.fromarray(np.moveaxis(_lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self : str ):
snake_case_ : Dict = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case_ : Optional[int] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
snake_case_ : List[Any] = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0 )
snake_case_ : str = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowercase )
def _snake_case ( self : Optional[int] ):
snake_case_ : int = self.get_image_processor()
snake_case_ : List[str] = self.get_tokenizer()
snake_case_ : List[str] = BlipaProcessor(tokenizer=_lowercase , image_processor=_lowercase )
snake_case_ : List[str] = self.prepare_image_inputs()
snake_case_ : Any = image_processor(_lowercase , return_tensors='''np''' )
snake_case_ : int = processor(images=_lowercase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _snake_case ( self : List[str] ):
snake_case_ : Any = self.get_image_processor()
snake_case_ : Tuple = self.get_tokenizer()
snake_case_ : Union[str, Any] = BlipaProcessor(tokenizer=_lowercase , image_processor=_lowercase )
snake_case_ : Optional[int] = '''lower newer'''
snake_case_ : Any = processor(text=_lowercase )
snake_case_ : Dict = tokenizer(_lowercase , return_token_type_ids=_lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _snake_case ( self : str ):
snake_case_ : Any = self.get_image_processor()
snake_case_ : List[Any] = self.get_tokenizer()
snake_case_ : str = BlipaProcessor(tokenizer=_lowercase , image_processor=_lowercase )
snake_case_ : int = '''lower newer'''
snake_case_ : List[str] = self.prepare_image_inputs()
snake_case_ : Optional[int] = processor(text=_lowercase , images=_lowercase )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(_lowercase ):
processor()
def _snake_case ( self : int ):
snake_case_ : List[str] = self.get_image_processor()
snake_case_ : int = self.get_tokenizer()
snake_case_ : Optional[Any] = BlipaProcessor(tokenizer=_lowercase , image_processor=_lowercase )
snake_case_ : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case_ : int = processor.batch_decode(_lowercase )
snake_case_ : Any = tokenizer.batch_decode(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
def _snake_case ( self : Union[str, Any] ):
snake_case_ : Any = self.get_image_processor()
snake_case_ : str = self.get_tokenizer()
snake_case_ : Any = BlipaProcessor(tokenizer=_lowercase , image_processor=_lowercase )
snake_case_ : List[Any] = '''lower newer'''
snake_case_ : Optional[int] = self.prepare_image_inputs()
snake_case_ : Union[str, Any] = processor(text=_lowercase , images=_lowercase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 370 |
"""simple docstring"""
def __lowercase ( _a , _a ):
return base * power(_a , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('''Raise base to the power of exponent using recursion...''')
lowercase__ : Optional[Any] = int(input('''Enter the base: ''').strip())
lowercase__ : int = int(input('''Enter the exponent: ''').strip())
lowercase__ : int = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
lowercase__ : Any = 1 / result
print(f'{base} to the power of {exponent} is {result}')
| 155 | 0 |
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> set:
'''simple docstring'''
lowerCAmelCase : List[Any] = set()
# edges = list of graph's edges
lowerCAmelCase : str = get_edges(__lowerCAmelCase )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
lowerCAmelCase : Optional[int] = edges.pop()
chosen_vertices.add(__lowerCAmelCase )
chosen_vertices.add(__lowerCAmelCase )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(__lowerCAmelCase )
return chosen_vertices
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> set:
'''simple docstring'''
lowerCAmelCase : Any = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 138 |
"""simple docstring"""
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
if isinstance(__lowerCAmelCase , torch.Tensor ):
return image
elif isinstance(__lowerCAmelCase , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ : Any = [image]
if isinstance(image[0] , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ : List[str] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
SCREAMING_SNAKE_CASE__ : List[Any] = np.concatenate(__lowerCAmelCase , axis=0 )
SCREAMING_SNAKE_CASE__ : List[str] = np.array(__lowerCAmelCase ).astype(np.floataa ) / 255.0
SCREAMING_SNAKE_CASE__ : Optional[Any] = image.transpose(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE__ : Tuple = 2.0 * image - 1.0
SCREAMING_SNAKE_CASE__ : Tuple = torch.from_numpy(__lowerCAmelCase )
elif isinstance(image[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE__ : Tuple = torch.cat(__lowerCAmelCase , dim=0 )
return image
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=0.9_995 ) -> Union[str, Any]:
if not isinstance(__lowerCAmelCase , np.ndarray ):
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : int = va.device
SCREAMING_SNAKE_CASE__ : str = va.cpu().numpy()
SCREAMING_SNAKE_CASE__ : str = va.cpu().numpy()
SCREAMING_SNAKE_CASE__ : Any = np.sum(va * va / (np.linalg.norm(__lowerCAmelCase ) * np.linalg.norm(__lowerCAmelCase )) )
if np.abs(__lowerCAmelCase ) > DOT_THRESHOLD:
SCREAMING_SNAKE_CASE__ : Tuple = (1 - t) * va + t * va
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = np.arccos(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.sin(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : str = theta_a * t
SCREAMING_SNAKE_CASE__ : Tuple = np.sin(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = np.sin(theta_a - theta_t ) / sin_theta_a
SCREAMING_SNAKE_CASE__ : Optional[int] = sin_theta_t / sin_theta_a
SCREAMING_SNAKE_CASE__ : List[Any] = sa * va + sa * va
if inputs_are_torch:
SCREAMING_SNAKE_CASE__ : str = torch.from_numpy(__lowerCAmelCase ).to(__lowerCAmelCase )
return va
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Any:
SCREAMING_SNAKE_CASE__ : Tuple = F.normalize(__lowerCAmelCase , dim=-1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = F.normalize(__lowerCAmelCase , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
for param in model.parameters():
SCREAMING_SNAKE_CASE__ : int = value
class __a (UpperCamelCase_):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a , _a , _a , _a=None , _a=None , _a=None , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
self.register_modules(
vae=_a , text_encoder=_a , clip_model=_a , tokenizer=_a , unet=_a , scheduler=_a , feature_extractor=_a , coca_model=_a , coca_tokenizer=_a , coca_transform=_a , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
feature_extractor.size
if isinstance(feature_extractor.size , _a )
else feature_extractor.size["""shortest_edge"""]
)
SCREAMING_SNAKE_CASE__ : List[Any] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , _a )
set_requires_grad(self.clip_model , _a )
def _a ( self , _a = "auto" ) -> Dict:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_a )
def _a ( self ) -> List[str]:
"""simple docstring"""
self.enable_attention_slicing(_a )
def _a ( self ) -> List[Any]:
"""simple docstring"""
set_requires_grad(self.vae , _a )
def _a ( self ) -> Dict:
"""simple docstring"""
set_requires_grad(self.vae , _a )
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
set_requires_grad(self.unet , _a )
def _a ( self ) -> int:
"""simple docstring"""
set_requires_grad(self.unet , _a )
def _a ( self , _a , _a , _a ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = min(int(num_inference_steps * strength ) , _a )
SCREAMING_SNAKE_CASE__ : Optional[int] = max(num_inference_steps - init_timestep , 0 )
SCREAMING_SNAKE_CASE__ : Dict = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _a ( self , _a , _a , _a , _a , _a , _a=None ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(_a , torch.Tensor ):
raise ValueError(f'''`image` has to be of type `torch.Tensor` but is {type(_a )}''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = image.to(device=_a , dtype=_a )
if isinstance(_a , _a ):
SCREAMING_SNAKE_CASE__ : int = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_a )
]
SCREAMING_SNAKE_CASE__ : Tuple = torch.cat(_a , dim=0 )
else:
SCREAMING_SNAKE_CASE__ : int = self.vae.encode(_a ).latent_dist.sample(_a )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0.18_215 * init_latents
SCREAMING_SNAKE_CASE__ : List[str] = init_latents.repeat_interleave(_a , dim=0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = randn_tensor(init_latents.shape , generator=_a , device=_a , dtype=_a )
# get latents
SCREAMING_SNAKE_CASE__ : Any = self.scheduler.add_noise(_a , _a , _a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = init_latents
return latents
def _a ( self , _a ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.coca_transform(_a ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
SCREAMING_SNAKE_CASE__ : List[Any] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
SCREAMING_SNAKE_CASE__ : str = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("""<end_of_text>""" )[0].replace("""<start_of_text>""" , """""" ).rstrip(""" .,""" )
def _a ( self , _a , _a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.feature_extractor.preprocess(_a )
SCREAMING_SNAKE_CASE__ : str = torch.from_numpy(clip_image_input["""pixel_values"""][0] ).unsqueeze(0 ).to(self.device ).half()
SCREAMING_SNAKE_CASE__ : Any = self.clip_model.get_image_features(_a )
SCREAMING_SNAKE_CASE__ : int = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_embeddings_clip.repeat_interleave(_a , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def _a ( self , _a , _a , _a , _a , _a , _a , _a , ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = latents.detach().requires_grad_()
SCREAMING_SNAKE_CASE__ : str = self.scheduler.scale_model_input(_a , _a )
# predict the noise residual
SCREAMING_SNAKE_CASE__ : Any = self.unet(_a , _a , encoder_hidden_states=_a ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.scheduler.alphas_cumprod[timestep]
SCREAMING_SNAKE_CASE__ : List[Any] = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
SCREAMING_SNAKE_CASE__ : Optional[Any] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
SCREAMING_SNAKE_CASE__ : List[str] = torch.sqrt(_a )
SCREAMING_SNAKE_CASE__ : Dict = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , _a ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.scheduler.sigmas[index]
SCREAMING_SNAKE_CASE__ : Dict = latents - sigma * noise_pred
else:
raise ValueError(f'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
SCREAMING_SNAKE_CASE__ : Optional[Any] = 1 / 0.18_215 * sample
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.vae.decode(_a ).sample
SCREAMING_SNAKE_CASE__ : Any = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ : Any = transforms.Resize(self.feature_extractor_size )(_a )
SCREAMING_SNAKE_CASE__ : Dict = self.normalize(_a ).to(latents.dtype )
SCREAMING_SNAKE_CASE__ : Tuple = self.clip_model.get_image_features(_a )
SCREAMING_SNAKE_CASE__ : int = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = spherical_dist_loss(_a , _a ).mean() * clip_guidance_scale
SCREAMING_SNAKE_CASE__ : Optional[Any] = -torch.autograd.grad(_a , _a )[0]
if isinstance(self.scheduler , _a ):
SCREAMING_SNAKE_CASE__ : Any = latents.detach() + grads * (sigma**2)
SCREAMING_SNAKE_CASE__ : Optional[int] = noise_pred_original
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = noise_pred_original - torch.sqrt(_a ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , _a , _a , _a = None , _a = None , _a = 512 , _a = 512 , _a = 0.6 , _a = 50 , _a = 7.5 , _a = 1 , _a = 0.0 , _a = 100 , _a = None , _a = "pil" , _a = True , _a = 0.8 , _a = 0.1 , _a = 0.1 , ) -> int:
"""simple docstring"""
if isinstance(_a , _a ) and len(_a ) != batch_size:
raise ValueError(f'''You have passed {batch_size} batch_size, but only {len(_a )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(_a , torch.Generator ) and batch_size > 1:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [generator] + [None] * (batch_size - 1)
SCREAMING_SNAKE_CASE__ : List[Any] = [
("""model""", self.coca_model is None),
("""tokenizer""", self.coca_tokenizer is None),
("""transform""", self.coca_transform is None),
]
SCREAMING_SNAKE_CASE__ : Optional[int] = [x[0] for x in coca_is_none if x[1]]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """, """.join(_a )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(_a ):
raise ValueError(
f'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
f'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
SCREAMING_SNAKE_CASE__ : Any = self.get_image_description(_a )
if style_prompt is None:
if len(_a ):
raise ValueError(
f'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
f''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
SCREAMING_SNAKE_CASE__ : Tuple = self.get_image_description(_a )
# get prompt text embeddings for content and style
SCREAMING_SNAKE_CASE__ : Any = self.tokenizer(
_a , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=_a , return_tensors="""pt""" , )
SCREAMING_SNAKE_CASE__ : Any = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer(
_a , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=_a , return_tensors="""pt""" , )
SCREAMING_SNAKE_CASE__ : List[str] = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = slerp(_a , _a , _a )
# duplicate text embeddings for each generation per prompt
SCREAMING_SNAKE_CASE__ : int = text_embeddings.repeat_interleave(_a , dim=0 )
# set timesteps
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """offset""" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
SCREAMING_SNAKE_CASE__ : Tuple = {}
if accepts_offset:
SCREAMING_SNAKE_CASE__ : List[str] = 1
self.scheduler.set_timesteps(_a , **_a )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_timesteps(_a , _a , self.device )
SCREAMING_SNAKE_CASE__ : List[str] = timesteps[:1].repeat(_a )
# Preprocess image
SCREAMING_SNAKE_CASE__ : str = preprocess(_a , _a , _a )
SCREAMING_SNAKE_CASE__ : Dict = self.prepare_latents(
_a , _a , _a , text_embeddings.dtype , self.device , _a )
SCREAMING_SNAKE_CASE__ : List[Any] = preprocess(_a , _a , _a )
SCREAMING_SNAKE_CASE__ : Any = self.prepare_latents(
_a , _a , _a , text_embeddings.dtype , self.device , _a )
SCREAMING_SNAKE_CASE__ : List[Any] = slerp(_a , _a , _a )
if clip_guidance_scale > 0:
SCREAMING_SNAKE_CASE__ : List[str] = self.get_clip_image_embeddings(_a , _a )
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_clip_image_embeddings(_a , _a )
SCREAMING_SNAKE_CASE__ : Dict = slerp(
_a , _a , _a )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
SCREAMING_SNAKE_CASE__ : str = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = content_text_input.input_ids.shape[-1]
SCREAMING_SNAKE_CASE__ : str = self.tokenizer([""""""] , padding="""max_length""" , max_length=_a , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
SCREAMING_SNAKE_CASE__ : Tuple = uncond_embeddings.repeat_interleave(_a , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
SCREAMING_SNAKE_CASE__ : Dict = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
SCREAMING_SNAKE_CASE__ : Any = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
SCREAMING_SNAKE_CASE__ : List[str] = torch.randn(_a , generator=_a , device="""cpu""" , dtype=_a ).to(
self.device )
else:
SCREAMING_SNAKE_CASE__ : Any = torch.randn(_a , generator=_a , device=self.device , dtype=_a )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
SCREAMING_SNAKE_CASE__ : List[str] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
SCREAMING_SNAKE_CASE__ : str = {}
if accepts_eta:
SCREAMING_SNAKE_CASE__ : Optional[Any] = eta
# check if the scheduler accepts generator
SCREAMING_SNAKE_CASE__ : int = """generator""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
SCREAMING_SNAKE_CASE__ : Optional[Any] = generator
with self.progress_bar(total=_a ):
for i, t in enumerate(_a ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE__ : List[str] = self.scheduler.scale_model_input(_a , _a )
# predict the noise residual
SCREAMING_SNAKE_CASE__ : List[Any] = self.unet(_a , _a , encoder_hidden_states=_a ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
SCREAMING_SNAKE_CASE__ : List[Any] = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.cond_fn(
_a , _a , _a , _a , _a , _a , _a , )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE__ : Any = self.scheduler.step(_a , _a , _a , **_a ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
SCREAMING_SNAKE_CASE__ : List[Any] = 1 / 0.18_215 * latents
SCREAMING_SNAKE_CASE__ : int = self.vae.decode(_a ).sample
SCREAMING_SNAKE_CASE__ : str = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__ : int = self.numpy_to_pil(_a )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=_a , nsfw_content_detected=_a )
| 132 | 0 |
'''simple docstring'''
import math
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
if initial_intensity < 0:
raise ValueError('''The value of intensity cannot be negative''' )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(snake_case__ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 311 |
'''simple docstring'''
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
lowercase : Any = 'src/transformers'
lowercase : str = 'docs/source/en/tasks'
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
with open(snake_case__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
A : Union[str, Any] = f.readlines()
# Find the start prompt.
A : List[Any] = 0
while not lines[start_index].startswith(snake_case__ ):
start_index += 1
start_index += 1
A : List[str] = start_index
while not lines[end_index].startswith(snake_case__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
lowercase : int = direct_transformers_import(TRANSFORMERS_PATH)
lowercase : str = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
lowercase : Optional[int] = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : int = TASK_GUIDE_TO_MODELS[task_guide]
A : List[str] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() )
A : Union[str, Any] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F'[{name}](../model_doc/{code})' for code, name in model_names.items()] ) + "\n"
def lowerCAmelCase_ ( snake_case__ , snake_case__=False ):
'''simple docstring'''
A, A, A, A : Optional[int] = _find_text_in_file(
filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''' , end_prompt='''<!--End of the generated tip-->''' , )
A : Optional[int] = get_model_list_for_task(snake_case__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(snake_case__ , snake_case__ ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F'The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'
''' to fix this.''' )
if __name__ == "__main__":
lowercase : Dict = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowercase : List[Any] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 311 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/config.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/config.json'''
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = "fnet"
def __init__( self : List[Any] , _UpperCamelCase : Tuple=3_2_0_0_0 , _UpperCamelCase : Optional[Any]=7_6_8 , _UpperCamelCase : Union[str, Any]=1_2 , _UpperCamelCase : str=3_0_7_2 , _UpperCamelCase : Optional[int]="gelu_new" , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : Union[str, Any]=5_1_2 , _UpperCamelCase : Tuple=4 , _UpperCamelCase : Tuple=0.02 , _UpperCamelCase : Tuple=1e-12 , _UpperCamelCase : List[str]=False , _UpperCamelCase : Union[str, Any]=5_1_2 , _UpperCamelCase : Any=3 , _UpperCamelCase : str=1 , _UpperCamelCase : List[Any]=2 , **_UpperCamelCase : List[str] , ) ->Union[str, Any]:
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
snake_case_ = vocab_size
snake_case_ = max_position_embeddings
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = initializer_range
snake_case_ = type_vocab_size
snake_case_ = layer_norm_eps
snake_case_ = use_tpu_fourier_optimizations
snake_case_ = tpu_short_seq_length | 8 |
'''simple docstring'''
def __lowerCamelCase ( ) -> Tuple:
for n in range(1 , 1000000 ):
yield n * (n + 1) // 2
def __lowerCamelCase ( lowerCAmelCase_ ) -> List[Any]:
_a : Any = 1
_a : Tuple = 2
while i * i <= n:
_a : Tuple = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def __lowerCamelCase ( ) -> str:
return next(i for i in triangle_number_generator() if count_divisors(lowerCAmelCase_ ) > 500 )
if __name__ == "__main__":
print(solution())
| 89 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase : int = {
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] = [
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 365 |
'''simple docstring'''
import argparse
import os
import re
lowerCAmelCase : Tuple = """src/transformers"""
# Pattern that looks at the indentation in a line.
lowerCAmelCase : str = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCAmelCase : str = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCAmelCase : Optional[int] = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCAmelCase : List[str] = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCAmelCase : Optional[int] = re.compile(r"""\[([^\]]+)\]""")
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : int = _re_indent.search(_A )
return "" if search is None else search.groups()[0]
def lowercase (_A , _A="" , _A=None , _A=None ):
"""simple docstring"""
_lowerCAmelCase : int = 0
_lowerCAmelCase : Dict = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(_A ):
index += 1
_lowerCAmelCase : Dict = ['\n'.join(lines[:index] )]
else:
_lowerCAmelCase : str = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCAmelCase : List[Any] = [lines[index]]
index += 1
while index < len(_A ) and (end_prompt is None or not lines[index].startswith(_A )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(_A ) )
if index < len(_A ) - 1:
_lowerCAmelCase : Union[str, Any] = [lines[index + 1]]
index += 1
else:
_lowerCAmelCase : Union[str, Any] = []
else:
blocks.append('\n'.join(_A ) )
_lowerCAmelCase : List[str] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_A ) > 0:
blocks.append('\n'.join(_A ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_A ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def lowercase (_A ):
"""simple docstring"""
def _inner(_A ):
return key(_A ).lower().replace('_' , '' )
return _inner
def lowercase (_A , _A=None ):
"""simple docstring"""
def noop(_A ):
return x
if key is None:
_lowerCAmelCase : List[Any] = noop
# Constants are all uppercase, they go first.
_lowerCAmelCase : List[Any] = [obj for obj in objects if key(_A ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCAmelCase : Tuple = [obj for obj in objects if key(_A )[0].isupper() and not key(_A ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCAmelCase : List[str] = [obj for obj in objects if not key(_A )[0].isupper()]
_lowerCAmelCase : Dict = ignore_underscore(_A )
return sorted(_A , key=_A ) + sorted(_A , key=_A ) + sorted(_A , key=_A )
def lowercase (_A ):
"""simple docstring"""
def _replace(_A ):
_lowerCAmelCase : Dict = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
_lowerCAmelCase : Union[str, Any] = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : int = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(_A )] ) + "]"
_lowerCAmelCase : Tuple = import_statement.split('\n' )
if len(_A ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCAmelCase : Optional[Any] = 2 if lines[1].strip() == '[' else 1
_lowerCAmelCase : List[str] = [(i, _re_strip_line.search(_A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCAmelCase : Dict = sort_objects(_A , key=lambda _A : x[1] )
_lowerCAmelCase : Tuple = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_A ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCAmelCase : Tuple = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowerCAmelCase : Optional[Any] = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : List[str] = keys[:-1]
_lowerCAmelCase : Optional[Any] = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(_A )] )
return "\n".join(_A )
else:
# Finally we have to deal with imports fitting on one line
_lowerCAmelCase : Union[str, Any] = _re_bracket_content.sub(_replace , _A )
return import_statement
def lowercase (_A , _A=True ):
"""simple docstring"""
with open(_A , encoding='utf-8' ) as f:
_lowerCAmelCase : Any = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCAmelCase : Tuple = split_code_in_indented_blocks(
_A , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_A ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCAmelCase : Tuple = main_blocks[block_idx]
_lowerCAmelCase : int = block.split('\n' )
# Get to the start of the imports.
_lowerCAmelCase : Tuple = 0
while line_idx < len(_A ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCAmelCase : Dict = len(_A )
else:
line_idx += 1
if line_idx >= len(_A ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCAmelCase : str = '\n'.join(block_lines[line_idx:-1] )
_lowerCAmelCase : Tuple = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCAmelCase : List[Any] = split_code_in_indented_blocks(_A , indent_level=_A )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCAmelCase : Optional[int] = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCAmelCase : int = [(pattern.search(_A ).groups()[0] if pattern.search(_A ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCAmelCase : Dict = [(i, key) for i, key in enumerate(_A ) if key is not None]
_lowerCAmelCase : Optional[int] = [x[0] for x in sorted(_A , key=lambda _A : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCAmelCase : int = 0
_lowerCAmelCase : Optional[Any] = []
for i in range(len(_A ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
_lowerCAmelCase : Optional[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(_A )
count += 1
# And we put our main block back together with its first and last line.
_lowerCAmelCase : Optional[int] = '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(_A ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(_A , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(_A ) )
def lowercase (_A=True ):
"""simple docstring"""
_lowerCAmelCase : int = []
for root, _, files in os.walk(_A ):
if "__init__.py" in files:
_lowerCAmelCase : Optional[Any] = sort_imports(os.path.join(_A , '__init__.py' ) , check_only=_A )
if result:
_lowerCAmelCase : Optional[int] = [os.path.join(_A , '__init__.py' )]
if len(_A ) > 0:
raise ValueError(f'Would overwrite {len(_A )} files, run `make style`.' )
if __name__ == "__main__":
lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
lowerCAmelCase : List[str] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 25 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.