code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class A__ :
UpperCAmelCase = 42 # [batch_size x 3]
UpperCAmelCase = 42 # [batch_size x 3]
UpperCAmelCase = 42 # [batch_size x 3]
UpperCAmelCase = 42 # [batch_size x 3]
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = 42
def __UpperCamelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def __UpperCamelCase ( self : Optional[int] ) -> torch.Tensor:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =torch.arange(self.height * self.width )
_SCREAMING_SNAKE_CASE =torch.stack(
[
pixel_indices % self.width,
torch.div(_a , self.width , rounding_mode='''trunc''' ),
] , axis=1 , )
return coords
@property
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE =self.shape
_SCREAMING_SNAKE_CASE =int(np.prod(_a ) )
_SCREAMING_SNAKE_CASE =self.get_image_coords()
_SCREAMING_SNAKE_CASE =torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
_SCREAMING_SNAKE_CASE =self.get_camera_rays(_a )
_SCREAMING_SNAKE_CASE =rays.view(_a , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def __UpperCamelCase ( self : Optional[int] , _a : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_SCREAMING_SNAKE_CASE =coords.view(_a , -1 , 2 )
_SCREAMING_SNAKE_CASE =self.resolution()
_SCREAMING_SNAKE_CASE =self.fov()
_SCREAMING_SNAKE_CASE =(flat.float() / (res - 1)) * 2 - 1
_SCREAMING_SNAKE_CASE =fracs * torch.tan(fov / 2 )
_SCREAMING_SNAKE_CASE =fracs.view(_a , -1 , 2 )
_SCREAMING_SNAKE_CASE =(
self.z.view(_a , 1 , 3 )
+ self.x.view(_a , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(_a , 1 , 3 ) * fracs[:, :, 1:]
)
_SCREAMING_SNAKE_CASE =directions / directions.norm(dim=-1 , keepdim=_a )
_SCREAMING_SNAKE_CASE =torch.stack(
[
torch.broadcast_to(self.origin.view(_a , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(_a , *_a , 2 , 3 )
def __UpperCamelCase ( self : Tuple , _a : int , _a : int ) -> "DifferentiableProjectiveCamera":
"""simple docstring"""
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=_a , height=_a , x_fov=self.x_fov , y_fov=self.y_fov , )
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
for theta in np.linspace(0 ,2 * np.pi ,num=20):
_SCREAMING_SNAKE_CASE =np.array([np.sin(a__), np.cos(a__), -0.5])
z /= np.sqrt(np.sum(z**2))
_SCREAMING_SNAKE_CASE =-z * 4
_SCREAMING_SNAKE_CASE =np.array([np.cos(a__), -np.sin(a__), 0.0])
_SCREAMING_SNAKE_CASE =np.cross(a__ ,a__)
origins.append(a__)
xs.append(a__)
ys.append(a__)
zs.append(a__)
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(a__ ,axis=0)).float() ,x=torch.from_numpy(np.stack(a__ ,axis=0)).float() ,y=torch.from_numpy(np.stack(a__ ,axis=0)).float() ,z=torch.from_numpy(np.stack(a__ ,axis=0)).float() ,width=a__ ,height=a__ ,x_fov=0.7 ,y_fov=0.7 ,shape=(1, len(a__)) ,)
| 691
|
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
snake_case_ : Optional[Any] = logging.getLogger(__name__)
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Optional[int] , _a : Union[str, Any] , _a : List[str] , _a : List[Any]=None , _a : Optional[Any]=None ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.layer[current_layer](_a , _a , head_mask[current_layer] )
_SCREAMING_SNAKE_CASE =layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , UpperCamelCase__ , )
class A__ ( UpperCamelCase__ ):
def __init__( self : List[str] , _a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
super().__init__(_a )
_SCREAMING_SNAKE_CASE =BertEncoderWithPabee(_a )
self.init_weights()
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
def __UpperCamelCase ( self : List[str] , _a : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =threshold
def __UpperCamelCase ( self : Dict , _a : int ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =patience
def __UpperCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.inference_layers_num / self.inference_instances_num
_SCREAMING_SNAKE_CASE =(
f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(_a )
@add_start_docstrings_to_model_forward(_a )
def __UpperCamelCase ( self : List[Any] , _a : Optional[Any]=None , _a : Optional[int]=None , _a : Any=None , _a : Union[str, Any]=None , _a : Union[str, Any]=None , _a : Union[str, Any]=None , _a : str=None , _a : Any=None , _a : str=None , _a : Optional[Any]=None , _a : Dict=False , ) -> Union[str, Any]:
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
_SCREAMING_SNAKE_CASE =input_ids.size()
elif inputs_embeds is not None:
_SCREAMING_SNAKE_CASE =inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
_SCREAMING_SNAKE_CASE =input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_SCREAMING_SNAKE_CASE =torch.ones(_a , device=_a )
if token_type_ids is None:
_SCREAMING_SNAKE_CASE =torch.zeros(_a , dtype=torch.long , device=_a )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_SCREAMING_SNAKE_CASE =self.get_extended_attention_mask(_a , _a , _a )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =encoder_hidden_states.size()
_SCREAMING_SNAKE_CASE =(encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
_SCREAMING_SNAKE_CASE =torch.ones(_a , device=_a )
_SCREAMING_SNAKE_CASE =self.invert_attention_mask(_a )
else:
_SCREAMING_SNAKE_CASE =None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_SCREAMING_SNAKE_CASE =self.get_head_mask(_a , self.config.num_hidden_layers )
_SCREAMING_SNAKE_CASE =self.embeddings(
input_ids=_a , position_ids=_a , token_type_ids=_a , inputs_embeds=_a )
_SCREAMING_SNAKE_CASE =embedding_output
if self.training:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.config.num_hidden_layers ):
_SCREAMING_SNAKE_CASE =self.encoder.adaptive_forward(
_a , current_layer=_a , attention_mask=_a , head_mask=_a )
_SCREAMING_SNAKE_CASE =self.pooler(_a )
_SCREAMING_SNAKE_CASE =output_layers[i](output_dropout(_a ) )
res.append(_a )
elif self.patience == 0: # Use all layers for inference
_SCREAMING_SNAKE_CASE =self.encoder(
_a , attention_mask=_a , head_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
_SCREAMING_SNAKE_CASE =self.pooler(encoder_outputs[0] )
_SCREAMING_SNAKE_CASE =[output_layers[self.config.num_hidden_layers - 1](_a )]
else:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
_SCREAMING_SNAKE_CASE =self.encoder.adaptive_forward(
_a , current_layer=_a , attention_mask=_a , head_mask=_a )
_SCREAMING_SNAKE_CASE =self.pooler(_a )
_SCREAMING_SNAKE_CASE =output_layers[i](_a )
if regression:
_SCREAMING_SNAKE_CASE =logits.detach()
if patient_result is not None:
_SCREAMING_SNAKE_CASE =patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
_SCREAMING_SNAKE_CASE =0
else:
_SCREAMING_SNAKE_CASE =logits.detach().argmax(dim=1 )
if patient_result is not None:
_SCREAMING_SNAKE_CASE =patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(_a ) ):
patient_counter += 1
else:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =logits
if patient_counter == self.patience:
break
_SCREAMING_SNAKE_CASE =[patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , UpperCamelCase__ , )
class A__ ( UpperCamelCase__ ):
def __init__( self : Optional[int] , _a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(_a )
_SCREAMING_SNAKE_CASE =config.num_labels
_SCREAMING_SNAKE_CASE =BertModelWithPabee(_a )
_SCREAMING_SNAKE_CASE =nn.Dropout(config.hidden_dropout_prob )
_SCREAMING_SNAKE_CASE =nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(_a )
def __UpperCamelCase ( self : List[str] , _a : Optional[Any]=None , _a : List[Any]=None , _a : Union[str, Any]=None , _a : List[str]=None , _a : Dict=None , _a : Optional[Any]=None , _a : Optional[Any]=None , ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.bert(
input_ids=_a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
_SCREAMING_SNAKE_CASE =(logits[-1],)
if labels is not None:
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =0
for ix, logits_item in enumerate(_a ):
if self.num_labels == 1:
# We are doing regression
_SCREAMING_SNAKE_CASE =MSELoss()
_SCREAMING_SNAKE_CASE =loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
_SCREAMING_SNAKE_CASE =CrossEntropyLoss()
_SCREAMING_SNAKE_CASE =loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
_SCREAMING_SNAKE_CASE =loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
_SCREAMING_SNAKE_CASE =(total_loss / total_weights,) + outputs
return outputs
| 691
| 1
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def __UpperCAmelCase ( UpperCamelCase__ :Callable , UpperCamelCase__ :float , UpperCamelCase__ :float , UpperCamelCase__ :float , UpperCamelCase__ :float ) -> np.array:
snake_case__ : Optional[int] = int(np.ceil((x_end - xa) / step_size ) )
snake_case__ : Optional[int] = np.zeros((n + 1,) )
snake_case__ : List[str] = ya
snake_case__ : Optional[int] = xa
for k in range(UpperCamelCase__ ):
snake_case__ : Any = y[k] + step_size * ode_func(UpperCamelCase__ , y[k] )
snake_case__ : Any = y[k] + (
(step_size / 2) * (ode_func(UpperCamelCase__ , y[k] ) + ode_func(x + step_size , UpperCamelCase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 574
|
'''simple docstring'''
from collections.abc import Generator
from math import sin
def __UpperCAmelCase ( UpperCamelCase__ :bytes ) -> bytes:
if len(UpperCamelCase__ ) != 32:
raise ValueError('''Input must be of length 32''' )
snake_case__ : Any = B''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def __UpperCAmelCase ( UpperCamelCase__ :int ) -> bytes:
if i < 0:
raise ValueError('''Input must be non-negative''' )
snake_case__ : Union[str, Any] = format(UpperCamelCase__ , '''08x''' )[-8:]
snake_case__ : Dict = B''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' )
return little_endian_hex
def __UpperCAmelCase ( UpperCamelCase__ :bytes ) -> bytes:
snake_case__ : Optional[Any] = B''''''
for char in message:
bit_string += format(UpperCamelCase__ , '''08b''' ).encode('''utf-8''' )
snake_case__ : List[str] = format(len(UpperCamelCase__ ) , '''064b''' ).encode('''utf-8''' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(UpperCamelCase__ ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def __UpperCAmelCase ( UpperCamelCase__ :bytes ) -> Generator[list[int], None, None]:
if len(UpperCamelCase__ ) % 512 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''' )
for pos in range(0 , len(UpperCamelCase__ ) , 512 ):
snake_case__ : Union[str, Any] = bit_string[pos : pos + 512]
snake_case__ : Optional[int] = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def __UpperCAmelCase ( UpperCamelCase__ :int ) -> int:
if i < 0:
raise ValueError('''Input must be non-negative''' )
snake_case__ : Tuple = format(UpperCamelCase__ , '''032b''' )
snake_case__ : Any = ''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(UpperCamelCase__ , 2 )
def __UpperCAmelCase ( UpperCamelCase__ :int , UpperCamelCase__ :int ) -> int:
return (a + b) % 2**32
def __UpperCAmelCase ( UpperCamelCase__ :int , UpperCamelCase__ :int ) -> int:
if i < 0:
raise ValueError('''Input must be non-negative''' )
if shift < 0:
raise ValueError('''Shift must be non-negative''' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def __UpperCAmelCase ( UpperCamelCase__ :bytes ) -> bytes:
snake_case__ : int = preprocess(UpperCamelCase__ )
snake_case__ : str = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
snake_case__ : List[str] = 0x67452301
snake_case__ : Any = 0xefcdab89
snake_case__ : List[Any] = 0x98badcfe
snake_case__ : int = 0x10325476
snake_case__ : Any = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(UpperCamelCase__ ):
snake_case__ : Dict = aa
snake_case__ : Tuple = ba
snake_case__ : Any = ca
snake_case__ : Any = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
snake_case__ : Dict = d ^ (b & (c ^ d))
snake_case__ : Optional[int] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
snake_case__ : Optional[Any] = c ^ (d & (b ^ c))
snake_case__ : Tuple = (5 * i + 1) % 16
elif i <= 47:
snake_case__ : Union[str, Any] = b ^ c ^ d
snake_case__ : List[str] = (3 * i + 5) % 16
else:
snake_case__ : int = c ^ (b | not_aa(UpperCamelCase__ ))
snake_case__ : Optional[Any] = (7 * i) % 16
snake_case__ : List[Any] = (f + a + added_consts[i] + block_words[g]) % 2**32
snake_case__ : Optional[int] = d
snake_case__ : Dict = c
snake_case__ : Dict = b
snake_case__ : int = sum_aa(UpperCamelCase__ , left_rotate_aa(UpperCamelCase__ , shift_amounts[i] ) )
# Add hashed chunk to running total
snake_case__ : Union[str, Any] = sum_aa(UpperCamelCase__ , UpperCamelCase__ )
snake_case__ : Union[str, Any] = sum_aa(UpperCamelCase__ , UpperCamelCase__ )
snake_case__ : Union[str, Any] = sum_aa(UpperCamelCase__ , UpperCamelCase__ )
snake_case__ : List[Any] = sum_aa(UpperCamelCase__ , UpperCamelCase__ )
snake_case__ : Optional[int] = reformat_hex(UpperCamelCase__ ) + reformat_hex(UpperCamelCase__ ) + reformat_hex(UpperCamelCase__ ) + reformat_hex(UpperCamelCase__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 574
| 1
|
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( __A ):
A_ : List[str] = (DDPMScheduler,)
def __UpperCamelCase ( self : Tuple , **__UpperCamelCase : str ) -> Optional[int]:
A = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**__UpperCamelCase )
return config
def __UpperCamelCase ( self : Optional[int] ) -> List[Any]:
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase )
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=__UpperCamelCase , beta_end=__UpperCamelCase )
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__UpperCamelCase )
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__UpperCamelCase )
def __UpperCamelCase ( self : str ) -> Dict:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__UpperCamelCase )
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
self.check_over_configs(thresholding=__UpperCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , )
def __UpperCamelCase ( self : int ) -> Dict:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCamelCase )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
for t in [0, 500, 999]:
self.check_over_forward(time_step=__UpperCamelCase )
def __UpperCamelCase ( self : Any ) -> Any:
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**__UpperCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def __UpperCamelCase ( self : Dict ) -> str:
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**__UpperCamelCase )
A = len(__UpperCamelCase )
A = self.dummy_model()
A = self.dummy_sample_deter
A = torch.manual_seed(0 )
for t in reversed(range(__UpperCamelCase ) ):
# 1. predict noise residual
A = model(__UpperCamelCase , __UpperCamelCase )
# 2. predict previous mean of sample x_t-1
A = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
A = pred_prev_sample
A = torch.sum(torch.abs(__UpperCamelCase ) )
A = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 258.9_606 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def __UpperCamelCase ( self : List[str] ) -> Dict:
A = self.scheduler_classes[0]
A = self.get_scheduler_config(prediction_type='v_prediction' )
A = scheduler_class(**__UpperCamelCase )
A = len(__UpperCamelCase )
A = self.dummy_model()
A = self.dummy_sample_deter
A = torch.manual_seed(0 )
for t in reversed(range(__UpperCamelCase ) ):
# 1. predict noise residual
A = model(__UpperCamelCase , __UpperCamelCase )
# 2. predict previous mean of sample x_t-1
A = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
A = pred_prev_sample
A = torch.sum(torch.abs(__UpperCamelCase ) )
A = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 202.0_296 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**__UpperCamelCase )
A = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__UpperCamelCase )
A = scheduler.timesteps
for i, timestep in enumerate(__UpperCamelCase ):
if i == len(__UpperCamelCase ) - 1:
A = -1
else:
A = timesteps[i + 1]
A = scheduler.previous_timestep(__UpperCamelCase )
A = prev_t.item()
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def __UpperCamelCase ( self : List[Any] ) -> Any:
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**__UpperCamelCase )
A = [100, 87, 50, 51, 0]
with self.assertRaises(__UpperCamelCase , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=__UpperCamelCase )
def __UpperCamelCase ( self : str ) -> List[Any]:
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**__UpperCamelCase )
A = [100, 87, 50, 1, 0]
A = len(__UpperCamelCase )
with self.assertRaises(__UpperCamelCase , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=__UpperCamelCase , timesteps=__UpperCamelCase )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**__UpperCamelCase )
A = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__UpperCamelCase , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=__UpperCamelCase )
| 106
|
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class UpperCAmelCase_ ( TensorFormatter[Mapping, '''torch.Tensor''', Mapping] ):
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase : str=None , **UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
super().__init__(features=UpperCAmelCase )
lowercase : List[Any] =torch_tensor_kwargs
import torch # noqa import torch at initialization
def A__ ( self : Union[str, Any] , UpperCAmelCase : Dict ) -> List[str]:
'''simple docstring'''
import torch
if isinstance(UpperCAmelCase , UpperCAmelCase ) and column:
if all(
isinstance(UpperCAmelCase , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(UpperCAmelCase )
return column
def A__ ( self : List[str] , UpperCAmelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
import torch
if isinstance(UpperCAmelCase , (str, bytes, type(UpperCAmelCase )) ):
return value
elif isinstance(UpperCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowercase : Any ={}
if isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
lowercase : Tuple ={'''dtype''': torch.intaa}
elif isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowercase : int ={'''dtype''': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCAmelCase , PIL.Image.Image ):
lowercase : List[Any] =np.asarray(UpperCAmelCase )
return torch.tensor(UpperCAmelCase , **{**default_dtype, **self.torch_tensor_kwargs} )
def A__ ( self : List[Any] , UpperCAmelCase : Dict ) -> Tuple:
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(UpperCAmelCase , '''__array__''' ) and not isinstance(UpperCAmelCase , torch.Tensor ):
lowercase : List[Any] =data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCAmelCase , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] )
elif isinstance(UpperCAmelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] )
return self._tensorize(UpperCAmelCase )
def A__ ( self : str , UpperCAmelCase : dict ) -> Any:
'''simple docstring'''
return map_nested(self._recursive_tensorize , UpperCAmelCase , map_list=UpperCAmelCase )
def A__ ( self : Union[str, Any] , UpperCAmelCase : pa.Table ) -> Mapping:
'''simple docstring'''
lowercase : List[str] =self.numpy_arrow_extractor().extract_row(UpperCAmelCase )
lowercase : Any =self.python_features_decoder.decode_row(UpperCAmelCase )
return self.recursive_tensorize(UpperCAmelCase )
def A__ ( self : Any , UpperCAmelCase : pa.Table ) -> "torch.Tensor":
'''simple docstring'''
lowercase : int =self.numpy_arrow_extractor().extract_column(UpperCAmelCase )
lowercase : Tuple =self.python_features_decoder.decode_column(UpperCAmelCase , pa_table.column_names[0] )
lowercase : Optional[Any] =self.recursive_tensorize(UpperCAmelCase )
lowercase : Any =self._consolidate(UpperCAmelCase )
return column
def A__ ( self : str , UpperCAmelCase : pa.Table ) -> Mapping:
'''simple docstring'''
lowercase : Tuple =self.numpy_arrow_extractor().extract_batch(UpperCAmelCase )
lowercase : List[str] =self.python_features_decoder.decode_batch(UpperCAmelCase )
lowercase : Dict =self.recursive_tensorize(UpperCAmelCase )
for column_name in batch:
lowercase : str =self._consolidate(batch[column_name] )
return batch
| 94
| 0
|
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def snake_case_ (__A : str = "laptop" ) -> DataFrame:
__lowerCAmelCase : int = f'''https://www.amazon.in/laptop/s?k={product}'''
__lowerCAmelCase : Tuple = {
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
__lowerCAmelCase : Any = BeautifulSoup(requests.get(__A , headers=__A ).text )
# Initialize a Pandas dataframe with the column titles
__lowerCAmelCase : Optional[int] = DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ):
try:
__lowerCAmelCase : Tuple = item.ha.text
__lowerCAmelCase : int = """https://www.amazon.in/""" + item.ha.a["""href"""]
__lowerCAmelCase : Dict = item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text
try:
__lowerCAmelCase : Union[str, Any] = item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text
except AttributeError:
__lowerCAmelCase : List[Any] = """Not available"""
try:
__lowerCAmelCase : Any = (
"""₹"""
+ item.find(
"""span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1]
)
except AttributeError:
__lowerCAmelCase : Optional[Any] = """"""
try:
__lowerCAmelCase : Optional[Any] = float(
(
(
float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
- float(product_price.strip("""₹""" ).replace(""",""" , """""" ) )
)
/ float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
)
* 1_0_0 )
except ValueError:
__lowerCAmelCase : str = float("""nan""" )
except AttributeError:
pass
__lowerCAmelCase : Union[str, Any] = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
__lowerCAmelCase : Tuple = """ """
__lowerCAmelCase : int = """ """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
__UpperCAmelCase = """headphones"""
get_amazon_product_data(product).to_csv(F'Amazon Product Data for {product}.csv')
| 218
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""weiweishi/roc-bert-base-zh""": """https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : Tuple ="roc_bert"
def __init__( self : Tuple , lowerCAmelCase : Union[str, Any]=3_05_22 , lowerCAmelCase : Union[str, Any]=7_68 , lowerCAmelCase : str=12 , lowerCAmelCase : str=12 , lowerCAmelCase : Any=30_72 , lowerCAmelCase : Union[str, Any]="gelu" , lowerCAmelCase : Optional[Any]=0.1 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : List[str]=5_12 , lowerCAmelCase : int=2 , lowerCAmelCase : str=0.02 , lowerCAmelCase : Optional[int]=1e-12 , lowerCAmelCase : Any=True , lowerCAmelCase : int=0 , lowerCAmelCase : List[Any]="absolute" , lowerCAmelCase : List[str]=None , lowerCAmelCase : str=True , lowerCAmelCase : Dict=True , lowerCAmelCase : int=7_68 , lowerCAmelCase : Union[str, Any]=9_10 , lowerCAmelCase : Tuple=5_12 , lowerCAmelCase : Tuple=2_48_58 , lowerCAmelCase : Any=True , **lowerCAmelCase : Union[str, Any] , ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = vocab_size
__lowerCAmelCase : Any = max_position_embeddings
__lowerCAmelCase : str = hidden_size
__lowerCAmelCase : List[str] = num_hidden_layers
__lowerCAmelCase : List[str] = num_attention_heads
__lowerCAmelCase : int = intermediate_size
__lowerCAmelCase : List[str] = hidden_act
__lowerCAmelCase : List[Any] = hidden_dropout_prob
__lowerCAmelCase : int = attention_probs_dropout_prob
__lowerCAmelCase : str = initializer_range
__lowerCAmelCase : Dict = type_vocab_size
__lowerCAmelCase : int = layer_norm_eps
__lowerCAmelCase : Union[str, Any] = use_cache
__lowerCAmelCase : Dict = enable_pronunciation
__lowerCAmelCase : Optional[int] = enable_shape
__lowerCAmelCase : Any = pronunciation_embed_dim
__lowerCAmelCase : Optional[Any] = pronunciation_vocab_size
__lowerCAmelCase : Tuple = shape_embed_dim
__lowerCAmelCase : Tuple = shape_vocab_size
__lowerCAmelCase : List[Any] = concat_input
__lowerCAmelCase : List[Any] = position_embedding_type
__lowerCAmelCase : List[Any] = classifier_dropout
super().__init__(pad_token_id=lowerCAmelCase , **lowerCAmelCase )
| 218
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase = {
'''configuration_llama''': ['''LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LlamaConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''LlamaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''LlamaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''LlamaForCausalLM''',
'''LlamaModel''',
'''LlamaPreTrainedModel''',
'''LlamaForSequenceClassification''',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 91
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
'''configuration_xlm_roberta_xl''': [
'''XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaXLConfig''',
'''XLMRobertaXLOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaXLForCausalLM''',
'''XLMRobertaXLForMaskedLM''',
'''XLMRobertaXLForMultipleChoice''',
'''XLMRobertaXLForQuestionAnswering''',
'''XLMRobertaXLForSequenceClassification''',
'''XLMRobertaXLForTokenClassification''',
'''XLMRobertaXLModel''',
'''XLMRobertaXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 91
| 1
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCamelCase :
@staticmethod
def _lowerCAmelCase ( *UpperCamelCase : List[str] , **UpperCamelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
_lowerCamelCase :Optional[int] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : Tuple , UpperCamelCase : Tuple , UpperCamelCase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : str = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
lowerCAmelCase__ : Optional[Any] = [
{
"""image""": Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""question""": """How many cats are there?""",
},
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""question""": """How many cats are there?""",
},
]
return vqa_pipeline, examples
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : Any , UpperCamelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = vqa_pipeline(UpperCamelCase , top_k=1 )
self.assertEqual(
UpperCamelCase , [
[{"""score""": ANY(UpperCamelCase ), """answer""": ANY(UpperCamelCase )}],
[{"""score""": ANY(UpperCamelCase ), """answer""": ANY(UpperCamelCase )}],
] , )
@require_torch
def _lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
lowerCAmelCase__ : Dict = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
lowerCAmelCase__ : int = """How many cats are there?"""
lowerCAmelCase__ : List[Any] = vqa_pipeline(image=UpperCamelCase , question="""How many cats are there?""" , top_k=2 )
self.assertEqual(
UpperCamelCase , [{"""score""": ANY(UpperCamelCase ), """answer""": ANY(UpperCamelCase )}, {"""score""": ANY(UpperCamelCase ), """answer""": ANY(UpperCamelCase )}] )
lowerCAmelCase__ : List[str] = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
UpperCamelCase , [{"""score""": ANY(UpperCamelCase ), """answer""": ANY(UpperCamelCase )}, {"""score""": ANY(UpperCamelCase ), """answer""": ANY(UpperCamelCase )}] )
@slow
@require_torch
def _lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Any = pipeline("""visual-question-answering""" , model="""dandelin/vilt-b32-finetuned-vqa""" )
lowerCAmelCase__ : Any = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
lowerCAmelCase__ : List[str] = """How many cats are there?"""
lowerCAmelCase__ : Any = vqa_pipeline(image=UpperCamelCase , question=UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}] )
lowerCAmelCase__ : Optional[int] = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}] )
lowerCAmelCase__ : Optional[int] = vqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [[{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}]] * 2 , )
@require_tf
@unittest.skip("""Visual question answering not implemented in TF""" )
def _lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
pass
| 507
|
"""simple docstring"""
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_A = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
_A = direct_transformers_import(PATH_TO_TRANSFORMERS)
_A = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_A = re.compile(r"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
_A = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def lowercase_ ( __UpperCAmelCase ) -> str:
lowerCAmelCase__ : Union[str, Any] = None
# source code of `config_class`
lowerCAmelCase__ : List[Any] = inspect.getsource(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = _re_checkpoint.findall(__UpperCAmelCase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("""/""" ):
lowerCAmelCase__ : Optional[Any] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
lowerCAmelCase__ : Dict = f"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
lowerCAmelCase__ : Optional[Any] = ckpt_name
break
return checkpoint
def lowercase_ ( ) -> Dict:
lowerCAmelCase__ : Union[str, Any] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
lowerCAmelCase__ : Dict = get_checkpoint_from_config_class(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
lowerCAmelCase__ : int = """\n""".join(sorted(__UpperCAmelCase ) )
raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 507
| 1
|
'''simple docstring'''
from __future__ import annotations
def _A ( A__ , A__ ):
"""simple docstring"""
print(F"Vertex\tShortest Distance from vertex {src}" )
for i, d in enumerate(A__ ):
print(F"{i}\t\t{d}" )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
for j in range(A__ ):
__lowercase , __lowercase , __lowercase = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = [float('''inf''' )] * vertex_count
__lowercase = 0.0
for _ in range(vertex_count - 1 ):
for j in range(A__ ):
__lowercase , __lowercase , __lowercase = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
__lowercase = distance[u] + w
__lowercase = check_negative_cycle(A__ , A__ , A__ )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = int(input('''Enter number of vertices: ''').strip())
lowerCAmelCase__ = int(input('''Enter number of edges: ''').strip())
lowerCAmelCase__ = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
lowerCAmelCase__ = {'''src''': src, '''dst''': dest, '''weight''': weight}
lowerCAmelCase__ = int(input('''\nEnter shortest path source:''').strip())
lowerCAmelCase__ = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 41
|
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"""
UpperCAmelCase__ : int = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ).convert("""RGB""" )
UpperCAmelCase__ : Any = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_145_466, 0.4_578_275, 0.40_821_073) , (0.26_862_954, 0.26_130_258, 0.27_577_711) ),
] )
UpperCAmelCase__ : Optional[int] = transform(__lowerCamelCase ).unsqueeze(0 ).to(__lowerCamelCase )
return image
def _lowerCamelCase ( __lowerCamelCase ) -> str:
'''simple docstring'''
if "visual_encoder" in key:
UpperCAmelCase__ : Dict = re.sub("""visual_encoder*""" , """vision_model.encoder""" , __lowerCamelCase )
if "blocks" in key:
UpperCAmelCase__ : Optional[Any] = re.sub(r"""blocks""" , """layers""" , __lowerCamelCase )
if "attn" in key:
UpperCAmelCase__ : List[str] = re.sub(r"""attn""" , """self_attn""" , __lowerCamelCase )
if "norm1" in key:
UpperCAmelCase__ : Union[str, Any] = re.sub(r"""norm1""" , """layer_norm1""" , __lowerCamelCase )
if "norm2" in key:
UpperCAmelCase__ : Any = re.sub(r"""norm2""" , """layer_norm2""" , __lowerCamelCase )
if "encoder.norm" in key:
UpperCAmelCase__ : Dict = re.sub(r"""encoder.norm""" , """post_layernorm""" , __lowerCamelCase )
if "encoder.patch_embed.proj" in key:
UpperCAmelCase__ : List[str] = re.sub(r"""encoder.patch_embed.proj""" , """embeddings.patch_embedding""" , __lowerCamelCase )
if "encoder.pos_embed" in key:
UpperCAmelCase__ : List[str] = re.sub(r"""encoder.pos_embed""" , """embeddings.position_embedding""" , __lowerCamelCase )
if "encoder.cls_token" in key:
UpperCAmelCase__ : List[Any] = re.sub(r"""encoder.cls_token""" , """embeddings.class_embedding""" , __lowerCamelCase )
if "self_attn" in key:
UpperCAmelCase__ : List[Any] = re.sub(r"""self_attn.proj""" , """self_attn.projection""" , __lowerCamelCase )
return key
@torch.no_grad()
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase=None ) -> Tuple:
'''simple docstring'''
if config_path is not None:
UpperCAmelCase__ : Any = BlipConfig.from_pretrained(__lowerCamelCase )
else:
UpperCAmelCase__ : str = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
UpperCAmelCase__ : int = BlipForConditionalGeneration(__lowerCamelCase ).eval()
UpperCAmelCase__ : Any = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"""
UpperCAmelCase__ : List[str] = blip_decoder(pretrained=__lowerCamelCase , image_size=384 , vit="""base""" )
UpperCAmelCase__ : Union[str, Any] = pt_model.eval()
UpperCAmelCase__ : Optional[int] = pt_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase__ : Dict = modified_state_dict.pop(__lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = rename_key(__lowerCamelCase )
UpperCAmelCase__ : List[str] = value
hf_model.load_state_dict(__lowerCamelCase )
UpperCAmelCase__ : Tuple = 384
UpperCAmelCase__ : str = load_demo_image(image_size=__lowerCamelCase , device="""cpu""" )
UpperCAmelCase__ : str = BertTokenizer.from_pretrained("""bert-base-uncased""" )
UpperCAmelCase__ : Dict = tokenizer(["""a picture of"""] ).input_ids
UpperCAmelCase__ : int = hf_model.generate(__lowerCamelCase , __lowerCamelCase )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
UpperCAmelCase__ : Any = hf_model.generate(__lowerCamelCase )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(__lowerCamelCase )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
UpperCAmelCase__ : Union[str, Any] = (
"""https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"""
)
UpperCAmelCase__ : List[Any] = blip_vqa(pretrained=__lowerCamelCase , image_size=__lowerCamelCase , vit="""base""" )
vqa_model.eval()
UpperCAmelCase__ : str = vqa_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase__ : Dict = modified_state_dict.pop(__lowerCamelCase )
UpperCAmelCase__ : Dict = rename_key(__lowerCamelCase )
UpperCAmelCase__ : int = value
UpperCAmelCase__ : List[str] = BlipForQuestionAnswering(__lowerCamelCase )
hf_vqa_model.load_state_dict(__lowerCamelCase )
UpperCAmelCase__ : Tuple = ["""How many dogs are in this image?"""]
UpperCAmelCase__ : Union[str, Any] = tokenizer(__lowerCamelCase , return_tensors="""pt""" ).input_ids
UpperCAmelCase__ : Optional[Any] = hf_vqa_model.generate(__lowerCamelCase , __lowerCamelCase )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + """_vqa""" )
UpperCAmelCase__ : int = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"""
UpperCAmelCase__ : Any = blip_itm(pretrained=__lowerCamelCase , image_size=__lowerCamelCase , vit="""base""" )
itm_model.eval()
UpperCAmelCase__ : List[Any] = itm_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase__ : Dict = modified_state_dict.pop(__lowerCamelCase )
UpperCAmelCase__ : int = rename_key(__lowerCamelCase )
UpperCAmelCase__ : Any = value
UpperCAmelCase__ : Optional[int] = BlipForImageTextRetrieval(__lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = ["""A picture of a woman with a dog sitting in a beach"""]
UpperCAmelCase__ : List[Any] = tokenizer(
__lowerCamelCase , return_tensors="""pt""" , padding="""max_length""" , truncation=__lowerCamelCase , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(__lowerCamelCase )
hf_itm_model.eval()
UpperCAmelCase__ : List[str] = hf_itm_model(__lowerCamelCase , __lowerCamelCase , use_itm_head=__lowerCamelCase )
UpperCAmelCase__ : List[str] = hf_itm_model(__lowerCamelCase , __lowerCamelCase , use_itm_head=__lowerCamelCase )
assert out[0].item() == 0.2_110_687_494_277_954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45_698_845_386_505_127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + """_itm""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 79
| 0
|
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
__a : int = HUGGINGFACE_HUB_CACHE
__a : int = """config.json"""
__a : Optional[Any] = """diffusion_pytorch_model.bin"""
__a : Dict = """diffusion_flax_model.msgpack"""
__a : str = """model.onnx"""
__a : Optional[Any] = """diffusion_pytorch_model.safetensors"""
__a : Dict = """weights.pb"""
__a : Optional[Any] = """https://huggingface.co"""
__a : str = default_cache_path
__a : List[str] = """diffusers_modules"""
__a : Union[str, Any] = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
__a : Any = ["""fp16""", """non-ema"""]
__a : Optional[int] = """.self_attn"""
| 559
|
from sklearn.metrics import mean_squared_error
import datasets
__a : Union[str, Any] = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
__a : Dict = """\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
"""
__a : Any = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
\"raw_values\" : Returns a full set of errors in case of multioutput input.
\"uniform_average\" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric(\"mse\")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{'mse': 0.6123724356957945}
If you're using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mse': array([0.41666667, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'
] , )
def lowercase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('float' ) ),
"references": datasets.Sequence(datasets.Value('float' ) ),
}
else:
return {
"predictions": datasets.Value('float' ),
"references": datasets.Value('float' ),
}
def lowercase__ ( self : str , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : Union[str, Any]="uniform_average" , __UpperCAmelCase : List[Any]=True ) -> int:
"""simple docstring"""
UpperCamelCase_ = mean_squared_error(
__UpperCAmelCase , __UpperCAmelCase , sample_weight=__UpperCAmelCase , multioutput=__UpperCAmelCase , squared=__UpperCAmelCase )
return {"mse": mse}
| 559
| 1
|
from math import pi
def A__( __lowerCAmelCase , __lowerCAmelCase ):
return 2 * pi * radius * (angle / 3_60)
if __name__ == "__main__":
print(arc_length(90, 10))
| 304
|
"""simple docstring"""
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
lowercase__ = 10
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : list[int] , _lowerCamelCase : int ):
for i in range(_lowerCamelCase , _lowerCamelCase ):
if array[i] == target:
return i
return -1
def __magic_name__ ( _lowerCamelCase : list[int] , _lowerCamelCase : int ):
__a : Optional[int] = 0
__a : Tuple = len(_lowerCamelCase )
while left <= right:
if right - left < precision:
return lin_search(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__a : Tuple = (left + right) // 3 + 1
__a : List[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
__a : Optional[Any] = one_third - 1
elif array[two_third] < target:
__a : List[Any] = two_third + 1
else:
__a : Any = one_third + 1
__a : Union[str, Any] = two_third - 1
else:
return -1
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : list[int] , _lowerCamelCase : int ):
if left < right:
if right - left < precision:
return lin_search(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__a : Any = (left + right) // 3 + 1
__a : Optional[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(_lowerCamelCase , one_third - 1 , _lowerCamelCase , _lowerCamelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , _lowerCamelCase , _lowerCamelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ = input("Enter numbers separated by comma:\n").strip()
lowercase__ = [int(item.strip()) for item in user_input.split(",")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
lowercase__ = int(input("Enter the number to be found in the list:\n").strip())
lowercase__ = ite_ternary_search(collection, target)
lowercase__ = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'Iterative search: {target} found at positions: {resulta}')
print(f'Recursive search: {target} found at positions: {resulta}')
else:
print("Not found")
| 581
| 0
|
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = BioGptTokenizer
snake_case_ = False
def A_ ( self : int ) ->Any:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__ : List[str] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
SCREAMING_SNAKE_CASE__ : Dict = dict(zip(a , range(len(a ) ) ) )
SCREAMING_SNAKE_CASE__ : List[str] = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
SCREAMING_SNAKE_CASE__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(a ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(a ) )
def A_ ( self : List[str] , a : Optional[int] ) ->Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] = "lower newer"
SCREAMING_SNAKE_CASE__ : str = "lower newer"
return input_text, output_text
def A_ ( self : Optional[int] ) ->Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] = BioGptTokenizer(self.vocab_file , self.merges_file )
SCREAMING_SNAKE_CASE__ : Optional[int] = "lower"
SCREAMING_SNAKE_CASE__ : Tuple = ["low", "er</w>"]
SCREAMING_SNAKE_CASE__ : Any = tokenizer.tokenize(a )
self.assertListEqual(a , a )
SCREAMING_SNAKE_CASE__ : Dict = tokens + ["<unk>"]
SCREAMING_SNAKE_CASE__ : Any = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
@slow
def A_ ( self : Dict ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : str = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.encode("sequence builders" , add_special_tokens=a )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.encode("multi-sequence build" , add_special_tokens=a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(a , a )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 709
|
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__lowercase :List[Any] = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]=None ):
'''simple docstring'''
require_version(deps[pkg] , _lowerCamelCase )
| 26
| 0
|
import pytest
a_ = '__dummy_dataset1__'
a_ = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def lowerCamelCase__ ( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def lowerCamelCase__ ( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : Dict = dataset_loading_script_name
SCREAMING_SNAKE_CASE : Optional[Any] = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=_a)
SCREAMING_SNAKE_CASE : Union[str, Any] = script_dir / f"{script_name}.py"
with open(_a , "w") as f:
f.write(_a)
return str(_a)
| 25
|
def A__ ( _a : int ):
'''simple docstring'''
snake_case__ : str =generate_pascal_triangle(_a )
for row_idx in range(_a ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=""" """ )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=""" """ )
else:
print(triangle[row_idx][col_idx] , end="""""" )
print()
def A__ ( _a : int ):
'''simple docstring'''
if not isinstance(_a , _a ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
snake_case__ : list[list[int]] =[]
for current_row_idx in range(_a ):
snake_case__ : Optional[Any] =populate_current_row(_a , _a )
triangle.append(_a )
return triangle
def A__ ( _a : list[list[int]] , _a : int ):
'''simple docstring'''
snake_case__ : Any =[-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
snake_case__ , snake_case__ : List[str] =1, 1
for current_col_idx in range(1 , _a ):
calculate_current_element(
_a , _a , _a , _a )
return current_row
def A__ ( _a : list[list[int]] , _a : list[int] , _a : int , _a : int , ):
'''simple docstring'''
snake_case__ : List[Any] =triangle[current_row_idx - 1][current_col_idx - 1]
snake_case__ : Tuple =triangle[current_row_idx - 1][current_col_idx]
snake_case__ : Union[str, Any] =above_to_left_elt + above_to_right_elt
def A__ ( _a : int ):
'''simple docstring'''
if not isinstance(_a , _a ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
snake_case__ : list[list[int]] =[[1]]
for row_index in range(1 , _a ):
snake_case__ : Tuple =[0] + result[-1] + [0]
snake_case__ : Optional[Any] =row_index + 1
# Calculate the number of distinct elements in a row
snake_case__ : int =sum(divmod(_a , 2 ) )
snake_case__ : List[str] =[
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
snake_case__ : List[str] =row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
snake_case__ : Optional[int] =row_first_half + row_second_half
result.append(_a )
return result
def A__ ( ):
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_a : Callable , _a : int ) -> None:
snake_case__ : List[str] =f"{func.__name__}({value})"
snake_case__ : Tuple =timeit(f"__main__.{call}" , setup="""import __main__""" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(_a , _a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 385
| 0
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def __UpperCamelCase ( lowerCAmelCase__ : Dict ):
__a : List[str] = 3_8_4
__a : Dict = 7
if "tiny" in model_name:
__a : int = 9_6
__a : Optional[Any] = (2, 2, 6, 2)
__a : Tuple = (3, 6, 1_2, 2_4)
elif "small" in model_name:
__a : Dict = 9_6
__a : List[Any] = (2, 2, 1_8, 2)
__a : Any = (3, 6, 1_2, 2_4)
elif "base" in model_name:
__a : Optional[int] = 1_2_8
__a : Tuple = (2, 2, 1_8, 2)
__a : Optional[int] = (4, 8, 1_6, 3_2)
__a : int = 1_2
__a : Dict = 5_1_2
elif "large" in model_name:
__a : str = 1_9_2
__a : Tuple = (2, 2, 1_8, 2)
__a : Union[str, Any] = (6, 1_2, 2_4, 4_8)
__a : List[Any] = 1_2
__a : Tuple = 7_6_8
# set label information
__a : Any = 1_5_0
__a : Dict = '''huggingface/label-files'''
__a : Any = '''ade20k-id2label.json'''
__a : Dict = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='''dataset''' ) , '''r''' ) )
__a : Tuple = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
__a : Optional[Any] = {v: k for k, v in idalabel.items()}
__a : str = SwinConfig(
embed_dim=lowerCAmelCase__ , depths=lowerCAmelCase__ , num_heads=lowerCAmelCase__ , window_size=lowerCAmelCase__ , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
__a : Any = UperNetConfig(
backbone_config=lowerCAmelCase__ , auxiliary_in_channels=lowerCAmelCase__ , num_labels=lowerCAmelCase__ , idalabel=lowerCAmelCase__ , labelaid=lowerCAmelCase__ , )
return config
def __UpperCamelCase ( lowerCAmelCase__ : int ):
__a : List[str] = []
# fmt: off
# stem
rename_keys.append(('''backbone.patch_embed.projection.weight''', '''backbone.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.projection.bias''', '''backbone.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''backbone.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''backbone.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((f"backbone.stages.{i}.downsample.reduction.weight", f"backbone.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((f"backbone.stages.{i}.downsample.norm.weight", f"backbone.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((f"backbone.stages.{i}.downsample.norm.bias", f"backbone.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append((f"backbone.norm{i}.weight", f"backbone.hidden_states_norms.stage{i+1}.weight") )
rename_keys.append((f"backbone.norm{i}.bias", f"backbone.hidden_states_norms.stage{i+1}.bias") )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def __UpperCamelCase ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any ):
__a : Optional[Any] = dct.pop(lowerCAmelCase__ )
__a : List[Any] = val
def __UpperCamelCase ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : int ):
__a : Tuple = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__a : Union[str, Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__a : Optional[int] = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight" )
__a : str = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__a : int = in_proj_weight[:dim, :]
__a : List[Any] = in_proj_bias[: dim]
__a : str = in_proj_weight[
dim : dim * 2, :
]
__a : Any = in_proj_bias[
dim : dim * 2
]
__a : int = in_proj_weight[
-dim :, :
]
__a : str = in_proj_bias[-dim :]
# fmt: on
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] ):
__a , __a : Optional[Any] = x.shape
__a : Optional[int] = x.reshape(lowerCAmelCase__ , 4 , in_channel // 4 )
__a : Optional[Any] = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(lowerCAmelCase__ , lowerCAmelCase__ )
return x
def __UpperCamelCase ( lowerCAmelCase__ : str ):
__a , __a : List[str] = x.shape
__a : str = x.reshape(lowerCAmelCase__ , in_channel // 4 , 4 )
__a : Optional[Any] = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(lowerCAmelCase__ , lowerCAmelCase__ )
return x
def __UpperCamelCase ( lowerCAmelCase__ : List[str] ):
__a : Optional[int] = x.shape[0]
__a : Any = x.reshape(4 , in_channel // 4 )
__a : Union[str, Any] = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(lowerCAmelCase__ )
return x
def __UpperCamelCase ( lowerCAmelCase__ : Optional[Any] ):
__a : Dict = x.shape[0]
__a : Optional[int] = x.reshape(in_channel // 4 , 4 )
__a : Optional[int] = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(lowerCAmelCase__ )
return x
def __UpperCamelCase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] ):
__a : Dict = {
'''upernet-swin-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth''',
'''upernet-swin-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth''',
'''upernet-swin-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth''',
'''upernet-swin-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth''',
}
__a : Any = model_name_to_url[model_name]
__a : List[Any] = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location='''cpu''' , file_name=lowerCAmelCase__ )[
'''state_dict'''
]
for name, param in state_dict.items():
print(lowerCAmelCase__ , param.shape )
__a : List[Any] = get_upernet_config(lowerCAmelCase__ )
__a : Optional[int] = UperNetForSemanticSegmentation(lowerCAmelCase__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__a : Dict = state_dict.pop(lowerCAmelCase__ )
if "bn" in key:
__a : str = key.replace('''bn''' , '''batch_norm''' )
__a : Optional[Any] = val
# rename keys
__a : Optional[Any] = create_rename_keys(lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
read_in_q_k_v(lowerCAmelCase__ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
__a : List[Any] = reverse_correct_unfold_reduction_order(lowerCAmelCase__ )
if "norm" in key:
__a : Any = reverse_correct_unfold_norm_order(lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
# verify on image
__a : Optional[Any] = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
__a : Dict = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ).convert('''RGB''' )
__a : List[str] = SegformerImageProcessor()
__a : Union[str, Any] = processor(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
__a : int = model(lowerCAmelCase__ )
__a : List[str] = outputs.logits
print(logits.shape )
print('''First values of logits:''' , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
__a : Any = torch.tensor(
[[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] )
elif model_name == "upernet-swin-small":
__a : List[Any] = torch.tensor(
[[-7.19_21, -7.19_21, -6.95_32], [-7.19_21, -7.19_21, -6.95_32], [-7.09_08, -7.09_08, -6.85_34]] )
elif model_name == "upernet-swin-base":
__a : Tuple = torch.tensor(
[[-6.58_51, -6.58_51, -6.43_30], [-6.58_51, -6.58_51, -6.43_30], [-6.47_63, -6.47_63, -6.32_54]] )
elif model_name == "upernet-swin-large":
__a : Optional[Any] = torch.tensor(
[[-7.52_97, -7.52_97, -7.38_02], [-7.52_97, -7.52_97, -7.38_02], [-7.40_44, -7.40_44, -7.25_86]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCAmelCase__ , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase__ )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print(f"Pushing model and processor for {model_name} to hub" )
model.push_to_hub(f"openmmlab/{model_name}" )
processor.push_to_hub(f"openmmlab/{model_name}" )
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-swin-tiny',
type=str,
choices=[F"""upernet-swin-{size}""" for size in ['tiny', 'small', 'base', 'large']],
help='Name of the Swin + UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowercase__ =parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 326
|
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
lowercase__ ='src/transformers'
lowercase__ ='docs/source/en'
lowercase__ ='.'
def __UpperCamelCase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] ):
with open(lowerCAmelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__a : Any = f.readlines()
# Find the start prompt.
__a : List[Any] = 0
while not lines[start_index].startswith(lowerCAmelCase__ ):
start_index += 1
start_index += 1
__a : Any = start_index
while not lines[end_index].startswith(lowerCAmelCase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
lowercase__ ='Model|Encoder|Decoder|ForConditionalGeneration'
# Regexes that match TF/Flax/PT model names.
lowercase__ =re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
lowercase__ =re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowercase__ =re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# This is to make sure the transformers module imported is the one in the repo.
lowercase__ =direct_transformers_import(TRANSFORMERS_PATH)
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] ):
__a : Any = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , lowerCAmelCase__ )
return [m.group(0 ) for m in matches]
def __UpperCamelCase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] ):
__a : Optional[int] = 2 if text == '''✅''' or text == '''❌''' else len(lowerCAmelCase__ )
__a : List[Any] = (width - text_length) // 2
__a : Tuple = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def __UpperCamelCase ( ):
__a : List[str] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__a : Optional[Any] = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
__a : Union[str, Any] = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
__a : Optional[int] = collections.defaultdict(lowerCAmelCase__ )
__a : List[Any] = collections.defaultdict(lowerCAmelCase__ )
__a : Dict = collections.defaultdict(lowerCAmelCase__ )
__a : Tuple = collections.defaultdict(lowerCAmelCase__ )
__a : Union[str, Any] = collections.defaultdict(lowerCAmelCase__ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowerCAmelCase__ ):
__a : Any = None
if attr_name.endswith('''Tokenizer''' ):
__a : Union[str, Any] = slow_tokenizers
__a : List[str] = attr_name[:-9]
elif attr_name.endswith('''TokenizerFast''' ):
__a : Union[str, Any] = fast_tokenizers
__a : List[Any] = attr_name[:-1_3]
elif _re_tf_models.match(lowerCAmelCase__ ) is not None:
__a : List[str] = tf_models
__a : Tuple = _re_tf_models.match(lowerCAmelCase__ ).groups()[0]
elif _re_flax_models.match(lowerCAmelCase__ ) is not None:
__a : List[str] = flax_models
__a : str = _re_flax_models.match(lowerCAmelCase__ ).groups()[0]
elif _re_pt_models.match(lowerCAmelCase__ ) is not None:
__a : Union[str, Any] = pt_models
__a : int = _re_pt_models.match(lowerCAmelCase__ ).groups()[0]
if lookup_dict is not None:
while len(lowerCAmelCase__ ) > 0:
if attr_name in model_name_to_prefix.values():
__a : List[str] = True
break
# Try again after removing the last word in the name
__a : str = ''''''.join(camel_case_split(lowerCAmelCase__ )[:-1] )
# Let's build that table!
__a : Optional[int] = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
__a : Optional[int] = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support''']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
__a : Any = [len(lowerCAmelCase__ ) + 2 for c in columns]
__a : Union[str, Any] = max([len(lowerCAmelCase__ ) for name in model_names] ) + 2
# Build the table per se
__a : List[str] = '''|''' + '''|'''.join([_center_text(lowerCAmelCase__ , lowerCAmelCase__ ) for c, w in zip(lowerCAmelCase__ , lowerCAmelCase__ )] ) + '''|\n'''
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n"
__a : Union[str, Any] = {True: '''✅''', False: '''❌'''}
for name in model_names:
__a : str = model_name_to_prefix[name]
__a : str = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowerCAmelCase__ , lowerCAmelCase__ ) for l, w in zip(lowerCAmelCase__ , lowerCAmelCase__ )] ) + "|\n"
return table
def __UpperCamelCase ( lowerCAmelCase__ : Optional[int]=False ):
__a , __a , __a , __a : Optional[int] = _find_text_in_file(
filename=os.path.join(lowerCAmelCase__ , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , )
__a : Union[str, Any] = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowerCAmelCase__ , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' )
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowercase__ =parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 326
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase__ : int = '\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n'
def __UpperCamelCase( _A : List[str] , _A : Optional[int] , _A : Dict=8 ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
UpperCAmelCase__ : Tuple = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class _lowercase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,) -> Optional[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,movq=lowerCamelCase_ ,)
UpperCAmelCase__ : str = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Any:
'''simple docstring'''
if latents is None:
UpperCAmelCase__ : Dict = randn_tensor(lowerCamelCase_ ,generator=lowerCamelCase_ ,device=lowerCamelCase_ ,dtype=lowerCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
UpperCAmelCase__ : Optional[Any] = latents.to(lowerCamelCase_ )
UpperCAmelCase__ : Dict = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_=None ,) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : int = len(lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else 1
# get prompt text embeddings
UpperCAmelCase__ : Tuple = self.tokenizer(
lowerCamelCase_ ,padding='''max_length''' ,truncation=lowerCamelCase_ ,max_length=77 ,return_attention_mask=lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ ,return_tensors='''pt''' ,)
UpperCAmelCase__ : List[Any] = text_inputs.input_ids
UpperCAmelCase__ : List[str] = self.tokenizer(lowerCamelCase_ ,padding='''longest''' ,return_tensors='''pt''' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase__ : str = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCAmelCase__ : Tuple = text_input_ids.to(lowerCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = text_inputs.attention_mask.to(lowerCamelCase_ )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.text_encoder(
input_ids=lowerCamelCase_ ,attention_mask=lowerCamelCase_ )
UpperCAmelCase__ : Tuple = prompt_embeds.repeat_interleave(lowerCamelCase_ ,dim=0 )
UpperCAmelCase__ : List[str] = text_encoder_hidden_states.repeat_interleave(lowerCamelCase_ ,dim=0 )
UpperCAmelCase__ : str = text_mask.repeat_interleave(lowerCamelCase_ ,dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase__ : List[str]
if negative_prompt is None:
UpperCAmelCase__ : List[Any] = [''''''] * batch_size
elif type(lowerCamelCase_ ) is not type(lowerCamelCase_ ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase_ )} !='''
f''' {type(lowerCamelCase_ )}.''' )
elif isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase__ : Dict = [negative_prompt]
elif batch_size != len(lowerCamelCase_ ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase_ )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
''' the batch size of `prompt`.''' )
else:
UpperCAmelCase__ : Dict = negative_prompt
UpperCAmelCase__ : Union[str, Any] = self.tokenizer(
lowerCamelCase_ ,padding='''max_length''' ,max_length=77 ,truncation=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ ,return_tensors='''pt''' ,)
UpperCAmelCase__ : Optional[Any] = uncond_input.input_ids.to(lowerCamelCase_ )
UpperCAmelCase__ : List[str] = uncond_input.attention_mask.to(lowerCamelCase_ )
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.text_encoder(
input_ids=lowerCamelCase_ ,attention_mask=lowerCamelCase_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase__ : Optional[int] = negative_prompt_embeds.shape[1]
UpperCAmelCase__ : List[Any] = negative_prompt_embeds.repeat(1 ,lowerCamelCase_ )
UpperCAmelCase__ : List[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt ,lowerCamelCase_ )
UpperCAmelCase__ : int = uncond_text_encoder_hidden_states.shape[1]
UpperCAmelCase__ : int = uncond_text_encoder_hidden_states.repeat(1 ,lowerCamelCase_ ,1 )
UpperCAmelCase__ : List[str] = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt ,lowerCamelCase_ ,-1 )
UpperCAmelCase__ : Union[str, Any] = uncond_text_mask.repeat_interleave(lowerCamelCase_ ,dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase__ : str = torch.cat([negative_prompt_embeds, prompt_embeds] )
UpperCAmelCase__ : str = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
UpperCAmelCase__ : List[str] = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def lowerCAmelCase__ ( self ,lowerCamelCase_=0 ) -> List[str]:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
UpperCAmelCase__ : Dict = torch.device(f'''cuda:{gpu_id}''' )
UpperCAmelCase__ : List[Any] = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCamelCase_ ,lowerCamelCase_ )
def lowerCAmelCase__ ( self ,lowerCamelCase_=0 ) -> Union[str, Any]:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version('''>=''' ,'''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
UpperCAmelCase__ : str = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('''cpu''' ,silence_dtype_warnings=lowerCamelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase__ : Optional[Any] = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
UpperCAmelCase__ , UpperCAmelCase__ : Dict = cpu_offload_with_hook(lowerCamelCase_ ,lowerCamelCase_ ,prev_module_hook=lowerCamelCase_ )
if self.safety_checker is not None:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = cpu_offload_with_hook(self.safety_checker ,lowerCamelCase_ ,prev_module_hook=lowerCamelCase_ )
# We'll offload the last model manually.
UpperCAmelCase__ : str = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
if not hasattr(self.unet ,'''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCamelCase_ ,'''_hf_hook''' )
and hasattr(module._hf_hook ,'''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCamelCase_ )
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = None ,lowerCamelCase_ = 512 ,lowerCamelCase_ = 512 ,lowerCamelCase_ = 100 ,lowerCamelCase_ = 4.0 ,lowerCamelCase_ = 1 ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = "pil" ,lowerCamelCase_ = True ,) -> Optional[Any]:
'''simple docstring'''
if isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase__ : List[Any] = 1
elif isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase__ : Union[str, Any] = len(lowerCamelCase_ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase_ )}''' )
UpperCAmelCase__ : Union[str, Any] = self._execution_device
UpperCAmelCase__ : Optional[int] = batch_size * num_images_per_prompt
UpperCAmelCase__ : Dict = guidance_scale > 1.0
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self._encode_prompt(
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
if isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase__ : List[str] = torch.cat(lowerCamelCase_ ,dim=0 )
if isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase__ : int = torch.cat(lowerCamelCase_ ,dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase__ : Optional[Any] = image_embeds.repeat_interleave(lowerCamelCase_ ,dim=0 )
UpperCAmelCase__ : Optional[Any] = negative_image_embeds.repeat_interleave(lowerCamelCase_ ,dim=0 )
UpperCAmelCase__ : Any = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(
dtype=prompt_embeds.dtype ,device=lowerCamelCase_ )
self.scheduler.set_timesteps(lowerCamelCase_ ,device=lowerCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = self.scheduler.timesteps
UpperCAmelCase__ : List[Any] = self.unet.config.in_channels
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = get_new_h_w(lowerCamelCase_ ,lowerCamelCase_ ,self.movq_scale_factor )
# create initial latent
UpperCAmelCase__ : Optional[Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) ,text_encoder_hidden_states.dtype ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,self.scheduler ,)
for i, t in enumerate(self.progress_bar(lowerCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase__ : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase__ : Tuple = {'''text_embeds''': prompt_embeds, '''image_embeds''': image_embeds}
UpperCAmelCase__ : int = self.unet(
sample=lowerCamelCase_ ,timestep=lowerCamelCase_ ,encoder_hidden_states=lowerCamelCase_ ,added_cond_kwargs=lowerCamelCase_ ,return_dict=lowerCamelCase_ ,)[0]
if do_classifier_free_guidance:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = noise_pred.split(latents.shape[1] ,dim=1 )
UpperCAmelCase__ , UpperCAmelCase__ : Any = noise_pred.chunk(2 )
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = variance_pred.chunk(2 )
UpperCAmelCase__ : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase__ : List[str] = torch.cat([noise_pred, variance_pred_text] ,dim=1 )
if not (
hasattr(self.scheduler.config ,'''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = noise_pred.split(latents.shape[1] ,dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase__ : Tuple = self.scheduler.step(
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,generator=lowerCamelCase_ ,).prev_sample
# post-processing
UpperCAmelCase__ : Optional[int] = self.movq.decode(lowerCamelCase_ ,force_not_quantize=lowerCamelCase_ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
UpperCAmelCase__ : Optional[Any] = image * 0.5 + 0.5
UpperCAmelCase__ : Any = image.clamp(0 ,1 )
UpperCAmelCase__ : str = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase__ : List[Any] = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase_ )
| 614
|
'''simple docstring'''
def __UpperCamelCase( _A : float , _A : list[float] ):
'''simple docstring'''
if discount_rate < 0:
raise ValueError('''Discount rate cannot be negative''' )
if not cash_flows:
raise ValueError('''Cash flows list cannot be empty''' )
UpperCAmelCase__ : str = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_A ) )
return round(_A , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 614
| 1
|
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class _a ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase( self ):
__A : Tuple = torch.nn.Linear(10 , 10 )
__A : Optional[int] = torch.optim.SGD(model.parameters() , 0.1 )
__A : List[str] = Accelerator()
__A : Union[str, Any] = accelerator.prepare(__UpperCAmelCase )
try:
pickle.loads(pickle.dumps(__UpperCAmelCase ) )
except Exception as e:
self.fail(F"Accelerated optimizer pickling failed with {e}" )
AcceleratorState._reset_state()
| 387
|
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'vocab_file': 'spiece.model'}
UpperCamelCase = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
}
}
# TODO(PVP) - this should be removed in Transformers v5
UpperCamelCase = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
UpperCamelCase = '▁'
class _a ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Tuple = VOCAB_FILES_NAMES
lowerCamelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : List[str] = ["""input_ids""", """attention_mask"""]
def __init__( self , __UpperCAmelCase , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase=100 , __UpperCAmelCase=None , __UpperCAmelCase = None , __UpperCAmelCase=True , **__UpperCAmelCase , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__A : Dict = [F"<extra_id_{i}>" for i in range(__UpperCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__A : Any = len(set(filter(lambda __UpperCAmelCase : bool("extra_id" in str(__UpperCAmelCase ) ) , __UpperCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
if legacy:
logger.warning_once(
F"You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565" )
__A : Tuple = legacy
__A : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , extra_ids=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , legacy=__UpperCAmelCase , **__UpperCAmelCase , )
__A : Optional[Any] = vocab_file
__A : List[str] = extra_ids
__A : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
@staticmethod
def __UpperCAmelCase( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
__A : Union[str, Any] = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
F" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
F" {pretrained_model_name_or_path} automatically truncating your input to"
F" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"
F" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , __UpperCAmelCase , )
return max_model_length
@property
def __UpperCAmelCase( self ):
return self.sp_model.get_piece_size() + self._extra_ids
def __UpperCAmelCase( self ):
__A : List[str] = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__UpperCAmelCase )) + [1]
return ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1]
def __UpperCAmelCase( self ):
return list(
set(filter(lambda __UpperCAmelCase : bool(re.search(r"<extra_id_\d+>" , __UpperCAmelCase ) ) is not None , self.additional_special_tokens ) ) )
def __UpperCAmelCase( self ):
return [self._convert_token_to_id(__UpperCAmelCase ) for token in self.get_sentinel_tokens()]
def __UpperCAmelCase( self , __UpperCAmelCase ):
if len(__UpperCAmelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase = None ):
__A : List[str] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase = None ):
__A : List[Any] = self._add_eos_if_not_present(__UpperCAmelCase )
if token_ids_a is None:
return token_ids_a
else:
__A : str = self._add_eos_if_not_present(__UpperCAmelCase )
return token_ids_a + token_ids_a
def __getstate__( self ):
__A : Any = self.__dict__.copy()
__A : List[str] = None
return state
def __setstate__( self , __UpperCAmelCase ):
__A : int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__A : Optional[int] = {}
__A : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase( self , __UpperCAmelCase , **__UpperCAmelCase ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
__A : List[str] = SPIECE_UNDERLINE + text.replace(__UpperCAmelCase , " " )
return super().tokenize(__UpperCAmelCase , **__UpperCAmelCase )
def __UpperCAmelCase( self , __UpperCAmelCase , **__UpperCAmelCase ):
if not self.legacy:
__A : Tuple = text.startswith(__UpperCAmelCase )
if is_first:
__A : Optional[int] = text[1:]
__A : Optional[Any] = self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
if not self.legacy and not is_first and not text.startswith(" " ) and tokens[0].startswith(__UpperCAmelCase ):
__A : Tuple = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def __UpperCAmelCase( self , __UpperCAmelCase ):
if token.startswith("<extra_id_" ):
__A : Optional[Any] = re.match(r"<extra_id_(\d+)>" , __UpperCAmelCase )
__A : Optional[Any] = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(__UpperCAmelCase )
def __UpperCAmelCase( self , __UpperCAmelCase ):
if index < self.sp_model.get_piece_size():
__A : Union[str, Any] = self.sp_model.IdToPiece(__UpperCAmelCase )
else:
__A : List[Any] = F"<extra_id_{self.vocab_size - 1 - index}>"
return token
def __UpperCAmelCase( self , __UpperCAmelCase ):
__A : int = []
__A : List[Any] = ""
__A : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
__A : Tuple = True
__A : Any = []
else:
current_sub_tokens.append(__UpperCAmelCase )
__A : Optional[int] = False
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string.strip()
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase = None ):
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__A : Union[str, Any] = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , "wb" ) as fi:
__A : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 387
| 1
|
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
# Construct model
if openai_config_file == "":
UpperCamelCase__ : Union[str, Any] = OpenAIGPTConfig()
else:
UpperCamelCase__ : Tuple = OpenAIGPTConfig.from_json_file(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = OpenAIGPTModel(__SCREAMING_SNAKE_CASE )
# Load weights from numpy
load_tf_weights_in_openai_gpt(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save pytorch-model
UpperCamelCase__ : int = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
UpperCamelCase__ : Optional[Any] = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(__SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCAmelCase__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--openai_checkpoint_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the TensorFlow checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--openai_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
UpperCAmelCase__ : Any = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 410
|
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , UpperCamelCase , UpperCamelCase=2 , UpperCamelCase=32 , UpperCamelCase=16 , UpperCamelCase=3 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=32 , UpperCamelCase=4 , UpperCamelCase=[0, 1, 2, 3] , UpperCamelCase=4 , UpperCamelCase=37 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=0.02 , UpperCamelCase=3 , UpperCamelCase=[1, 3_84, 24, 24] , UpperCamelCase=True , UpperCamelCase=None , ) -> str:
UpperCamelCase__ : Union[str, Any] = parent
UpperCamelCase__ : Dict = batch_size
UpperCamelCase__ : Optional[int] = image_size
UpperCamelCase__ : Tuple = patch_size
UpperCamelCase__ : List[str] = num_channels
UpperCamelCase__ : Tuple = is_training
UpperCamelCase__ : Any = use_labels
UpperCamelCase__ : Tuple = hidden_size
UpperCamelCase__ : List[Any] = num_hidden_layers
UpperCamelCase__ : List[str] = backbone_out_indices
UpperCamelCase__ : Optional[Any] = num_attention_heads
UpperCamelCase__ : Union[str, Any] = intermediate_size
UpperCamelCase__ : List[Any] = hidden_act
UpperCamelCase__ : Any = hidden_dropout_prob
UpperCamelCase__ : List[str] = attention_probs_dropout_prob
UpperCamelCase__ : Union[str, Any] = initializer_range
UpperCamelCase__ : str = num_labels
UpperCamelCase__ : str = backbone_featmap_shape
UpperCamelCase__ : Dict = scope
UpperCamelCase__ : Dict = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase__ : int = (image_size // patch_size) ** 2
UpperCamelCase__ : Optional[int] = num_patches + 1
def lowerCAmelCase__ ( self) -> int:
UpperCamelCase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCamelCase__ : int = None
if self.use_labels:
UpperCamelCase__ : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
UpperCamelCase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self) -> str:
UpperCamelCase__ : List[str] = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [96, 1_92, 3_84, 7_68],
'num_groups': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=UpperCamelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def lowerCAmelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase) -> Union[str, Any]:
UpperCamelCase__ : List[str] = DPTModel(config=UpperCamelCase)
model.to(UpperCamelCase)
model.eval()
UpperCamelCase__ : Any = model(UpperCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def lowerCAmelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase) -> List[Any]:
UpperCamelCase__ : str = self.num_labels
UpperCamelCase__ : List[str] = DPTForDepthEstimation(UpperCamelCase)
model.to(UpperCamelCase)
model.eval()
UpperCamelCase__ : Optional[Any] = model(UpperCamelCase)
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size))
def lowerCAmelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase) -> Optional[int]:
UpperCamelCase__ : Optional[int] = self.num_labels
UpperCamelCase__ : List[Any] = DPTForSemanticSegmentation(UpperCamelCase)
model.to(UpperCamelCase)
model.eval()
UpperCamelCase__ : Dict = model(UpperCamelCase , labels=UpperCamelCase)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size))
def lowerCAmelCase__ ( self) -> int:
UpperCamelCase__ : List[str] = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Optional[int] = config_and_inputs
UpperCamelCase__ : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
UpperCamelCase_ = (
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
def lowerCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase__ : Any = DPTModelTester(self)
UpperCamelCase__ : List[Any] = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase , hidden_size=37)
def lowerCAmelCase__ ( self) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='DPT does not use inputs_embeds')
def lowerCAmelCase__ ( self) -> List[str]:
pass
def lowerCAmelCase__ ( self) -> int:
UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Any = model_class(UpperCamelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
UpperCamelCase__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase , nn.Linear))
def lowerCAmelCase__ ( self) -> str:
UpperCamelCase__ , UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Any = model_class(UpperCamelCase)
UpperCamelCase__ : Optional[int] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : List[Any] = [*signature.parameters.keys()]
UpperCamelCase__ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase)
def lowerCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase)
def lowerCAmelCase__ ( self) -> Tuple:
UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*UpperCamelCase)
def lowerCAmelCase__ ( self) -> Any:
UpperCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase)
def lowerCAmelCase__ ( self) -> List[Any]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase__ , UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : Any = True
if model_class in get_values(UpperCamelCase):
continue
UpperCamelCase__ : Any = model_class(UpperCamelCase)
model.to(UpperCamelCase)
model.train()
UpperCamelCase__ : str = self._prepare_for_class(UpperCamelCase , UpperCamelCase , return_labels=UpperCamelCase)
UpperCamelCase__ : Optional[Any] = model(**UpperCamelCase).loss
loss.backward()
def lowerCAmelCase__ ( self) -> Dict:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase__ , UpperCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : Union[str, Any] = False
UpperCamelCase__ : str = True
if model_class in get_values(UpperCamelCase) or not model_class.supports_gradient_checkpointing:
continue
UpperCamelCase__ : List[str] = model_class(UpperCamelCase)
model.to(UpperCamelCase)
model.gradient_checkpointing_enable()
model.train()
UpperCamelCase__ : Union[str, Any] = self._prepare_for_class(UpperCamelCase , UpperCamelCase , return_labels=UpperCamelCase)
UpperCamelCase__ : Tuple = model(**UpperCamelCase).loss
loss.backward()
def lowerCAmelCase__ ( self) -> str:
UpperCamelCase__ , UpperCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : Union[str, Any] = _config_zero_init(UpperCamelCase)
for model_class in self.all_model_classes:
UpperCamelCase__ : List[str] = model_class(config=UpperCamelCase)
# Skip the check for the backbone
UpperCamelCase__ : int = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
UpperCamelCase__ : Optional[Any] = [F"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def lowerCAmelCase__ ( self) -> Dict:
pass
@slow
def lowerCAmelCase__ ( self) -> Dict:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
UpperCamelCase__ : Dict = DPTModel.from_pretrained(UpperCamelCase)
self.assertIsNotNone(UpperCamelCase)
def lowerCAmelCase__ ( self) -> Optional[Any]:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
UpperCamelCase__ , UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : Optional[Any] = 'add'
with self.assertRaises(UpperCamelCase):
UpperCamelCase__ : Union[str, Any] = DPTForDepthEstimation(UpperCamelCase)
def _lowercase ( ) -> Dict:
UpperCamelCase__ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
@slow
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self) -> str:
UpperCamelCase__ : Union[str, Any] = DPTImageProcessor.from_pretrained('Intel/dpt-hybrid-midas')
UpperCamelCase__ : Dict = DPTForDepthEstimation.from_pretrained('Intel/dpt-hybrid-midas').to(UpperCamelCase)
UpperCamelCase__ : Any = prepare_img()
UpperCamelCase__ : Tuple = image_processor(images=UpperCamelCase , return_tensors='pt').to(UpperCamelCase)
# forward pass
with torch.no_grad():
UpperCamelCase__ : Dict = model(**UpperCamelCase)
UpperCamelCase__ : Optional[Any] = outputs.predicted_depth
# verify the predicted depth
UpperCamelCase__ : Dict = torch.Size((1, 3_84, 3_84))
self.assertEqual(predicted_depth.shape , UpperCamelCase)
UpperCamelCase__ : int = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]]).to(UpperCamelCase)
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_00 , UpperCamelCase , atol=1E-4))
| 410
| 1
|
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class UpperCAmelCase__ ( __a ):
"""simple docstring"""
__UpperCAmelCase : Any = ["image_processor", "tokenizer"]
__UpperCAmelCase : Optional[Any] = "OwlViTImageProcessor"
__UpperCAmelCase : Optional[Any] = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : Tuple ,_a : List[Any]=None ,_a : List[Any]=None ,**_a : List[str] ):
'''simple docstring'''
_a : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' ,lowerCAmelCase_ ,)
_a : Dict = kwargs.pop('feature_extractor' )
_a : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowerCAmelCase_ ,lowerCAmelCase_ )
def __call__( self : Dict ,_a : Optional[int]=None ,_a : Any=None ,_a : Any=None ,_a : Optional[int]="max_length" ,_a : Optional[Any]="np" ,**_a : str ):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(lowerCAmelCase_ ,lowerCAmelCase_ ) or (isinstance(lowerCAmelCase_ ,lowerCAmelCase_ ) and not isinstance(text[0] ,lowerCAmelCase_ )):
_a : Any = [self.tokenizer(lowerCAmelCase_ ,padding=lowerCAmelCase_ ,return_tensors=lowerCAmelCase_ ,**lowerCAmelCase_ )]
elif isinstance(lowerCAmelCase_ ,lowerCAmelCase_ ) and isinstance(text[0] ,lowerCAmelCase_ ):
_a : Any = []
# Maximum number of queries across batch
_a : Any = max([len(lowerCAmelCase_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(lowerCAmelCase_ ) != max_num_queries:
_a : int = t + [' '] * (max_num_queries - len(lowerCAmelCase_ ))
_a : List[Any] = self.tokenizer(lowerCAmelCase_ ,padding=lowerCAmelCase_ ,return_tensors=lowerCAmelCase_ ,**lowerCAmelCase_ )
encodings.append(lowerCAmelCase_ )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
_a : Optional[int] = np.concatenate([encoding['input_ids'] for encoding in encodings] ,axis=0 )
_a : Optional[int] = np.concatenate([encoding['attention_mask'] for encoding in encodings] ,axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_a : Optional[Any] = jnp.concatenate([encoding['input_ids'] for encoding in encodings] ,axis=0 )
_a : Optional[int] = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] ,axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
_a : str = torch.cat([encoding['input_ids'] for encoding in encodings] ,dim=0 )
_a : List[str] = torch.cat([encoding['attention_mask'] for encoding in encodings] ,dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_a : Union[str, Any] = tf.stack([encoding['input_ids'] for encoding in encodings] ,axis=0 )
_a : Optional[Any] = tf.stack([encoding['attention_mask'] for encoding in encodings] ,axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
_a : Union[str, Any] = BatchEncoding()
_a : Tuple = input_ids
_a : int = attention_mask
if query_images is not None:
_a : str = BatchEncoding()
_a : Union[str, Any] = self.image_processor(
lowerCAmelCase_ ,return_tensors=lowerCAmelCase_ ,**lowerCAmelCase_ ).pixel_values
_a : Optional[int] = query_pixel_values
if images is not None:
_a : Union[str, Any] = self.image_processor(lowerCAmelCase_ ,return_tensors=lowerCAmelCase_ ,**lowerCAmelCase_ )
if text is not None and images is not None:
_a : List[Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_a : Tuple = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase_ ) ,tensor_type=lowerCAmelCase_ )
def __lowercase ( self : List[str] ,*_a : str ,**_a : List[str] ):
'''simple docstring'''
return self.image_processor.post_process(*lowerCAmelCase_ ,**lowerCAmelCase_ )
def __lowercase ( self : List[str] ,*_a : int ,**_a : List[Any] ):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*lowerCAmelCase_ ,**lowerCAmelCase_ )
def __lowercase ( self : Tuple ,*_a : Dict ,**_a : int ):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*lowerCAmelCase_ ,**lowerCAmelCase_ )
def __lowercase ( self : Dict ,*_a : int ,**_a : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCAmelCase_ ,**lowerCAmelCase_ )
def __lowercase ( self : int ,*_a : Union[str, Any] ,**_a : str ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCAmelCase_ ,**lowerCAmelCase_ )
@property
def __lowercase ( self : List[Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' ,lowerCAmelCase_ ,)
return self.image_processor_class
@property
def __lowercase ( self : Tuple ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' ,lowerCAmelCase_ ,)
return self.image_processor
| 719
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
"""configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimesformerModel""",
"""TimesformerForVideoClassification""",
"""TimesformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 319
| 0
|
'''simple docstring'''
def __lowerCamelCase ( lowerCAmelCase_ ) -> List[Any]:
_a : int = []
if len(A_ ) == 1:
return [nums.copy()]
for _ in range(len(A_ ) ):
_a : Optional[int] = nums.pop(0 )
_a : List[str] = permute(A_ )
for perm in permutations:
perm.append(A_ )
result.extend(A_ )
nums.append(A_ )
return result
def __lowerCamelCase ( lowerCAmelCase_ ) -> str:
def backtrack(lowerCAmelCase_ ):
if start == len(A_ ) - 1:
output.append(nums[:] )
else:
for i in range(A_ , len(A_ ) ):
_a : int = nums[i], nums[start]
backtrack(start + 1 )
_a : int = nums[i], nums[start] # backtrack
_a : Optional[int] = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
__lowerCAmelCase = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 358
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case: Dict = {
"configuration_informer": [
"INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case: str = [
"INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"InformerForPrediction",
"InformerModel",
"InformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__snake_case: Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 577
| 0
|
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
a : int = datasets.utils.logging.get_logger(__name__)
class a ( folder_based_builder.FolderBasedBuilderConfig ):
snake_case_ = None
snake_case_ = None
class a ( folder_based_builder.FolderBasedBuilder ):
snake_case_ = datasets.Audio()
snake_case_ = "audio"
snake_case_ = AudioFolderConfig
snake_case_ = 42 # definition at the bottom of the script
snake_case_ = AudioClassification(audio_column="audio" , label_column="label" )
a : Union[str, Any] = [
'.aiff',
'.au',
'.avr',
'.caf',
'.flac',
'.htk',
'.svx',
'.mat4',
'.mat5',
'.mpc2k',
'.ogg',
'.paf',
'.pvf',
'.raw',
'.rf64',
'.sd2',
'.sds',
'.ircam',
'.voc',
'.w64',
'.wav',
'.nist',
'.wavex',
'.wve',
'.xi',
'.mp3',
'.opus',
]
a : Tuple = AUDIO_EXTENSIONS
| 707
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : Optional[Any] = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 593
| 0
|
_lowerCamelCase : Union[str, Any] = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def _a ( SCREAMING_SNAKE_CASE__ : dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any ) -> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = set()
# keep track of all the paths to be checked
SCREAMING_SNAKE_CASE__ : Optional[Any] = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
SCREAMING_SNAKE_CASE__ : Tuple = queue.pop(0 )
# get the last node from the path
SCREAMING_SNAKE_CASE__ : Optional[int] = path[-1]
if node not in explored:
SCREAMING_SNAKE_CASE__ : Dict = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
SCREAMING_SNAKE_CASE__ : List[Any] = list(SCREAMING_SNAKE_CASE__ )
new_path.append(SCREAMING_SNAKE_CASE__ )
queue.append(SCREAMING_SNAKE_CASE__ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(SCREAMING_SNAKE_CASE__ )
# in case there's no path between the 2 nodes
return []
def _a ( SCREAMING_SNAKE_CASE__ : dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any ) -> int:
'''simple docstring'''
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [start]
SCREAMING_SNAKE_CASE__ : str = set(SCREAMING_SNAKE_CASE__ )
# Keep tab on distances from `start` node.
SCREAMING_SNAKE_CASE__ : int = {start: 0, target: -1}
while queue:
SCREAMING_SNAKE_CASE__ : Tuple = queue.pop(0 )
if node == target:
SCREAMING_SNAKE_CASE__ : Optional[int] = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(SCREAMING_SNAKE_CASE__ )
queue.append(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 663
|
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase (__lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = GPTaTokenizer
UpperCAmelCase_ = GPTaTokenizerFast
UpperCAmelCase_ = True
UpperCAmelCase_ = {"add_prefix_space": True}
UpperCAmelCase_ = False
def A_ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__ : Any = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
SCREAMING_SNAKE_CASE__ : int = dict(zip(_UpperCAmelCase, range(len(_UpperCAmelCase ) ) ) )
SCREAMING_SNAKE_CASE__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
SCREAMING_SNAKE_CASE__ : Any = {"unk_token": "<unk>"}
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + "\n" )
with open(self.merges_file, "w", encoding="utf-8" ) as fp:
fp.write("\n".join(_UpperCAmelCase ) )
def A_ ( self : Tuple, **_UpperCAmelCase : str ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def A_ ( self : int, **_UpperCAmelCase : Union[str, Any] ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def A_ ( self : Tuple, _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = "lower newer"
SCREAMING_SNAKE_CASE__ : List[Any] = "lower newer"
return input_text, output_text
def A_ ( self : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = GPTaTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
SCREAMING_SNAKE_CASE__ : Tuple = "lower newer"
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.tokenize(_UpperCAmelCase, add_prefix_space=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ : Dict = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ), _UpperCAmelCase )
def A_ ( self : Dict ) -> str:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE__ : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_rust_tokenizer(add_prefix_space=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = "lower newer"
# Testing tokenization
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.tokenize(_UpperCAmelCase, add_prefix_space=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
# Testing conversion to ids without special tokens
SCREAMING_SNAKE_CASE__ : Any = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase, add_prefix_space=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
# Testing conversion to ids with special tokens
SCREAMING_SNAKE_CASE__ : Tuple = self.get_rust_tokenizer(add_prefix_space=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.encode(_UpperCAmelCase, add_prefix_space=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
# Testing the unknown token
SCREAMING_SNAKE_CASE__ : Dict = tokens + [rust_tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ : str = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_UpperCAmelCase ), _UpperCAmelCase )
def A_ ( self : Tuple, *_UpperCAmelCase : List[Any], **_UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def A_ ( self : Optional[Any], _UpperCAmelCase : int=1_5 ) -> List[str]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE__ : Any = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase, **_UpperCAmelCase )
# Simple input
SCREAMING_SNAKE_CASE__ : Optional[Any] = "This is a simple input"
SCREAMING_SNAKE_CASE__ : List[str] = ["This is a simple input 1", "This is a simple input 2"]
SCREAMING_SNAKE_CASE__ : Any = ("This is a simple input", "This is a pair")
SCREAMING_SNAKE_CASE__ : List[Any] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(_UpperCAmelCase, tokenizer_r.encode, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" )
# Simple input
self.assertRaises(_UpperCAmelCase, tokenizer_r.encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" )
# Simple input
self.assertRaises(
_UpperCAmelCase, tokenizer_r.batch_encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length", )
# Pair input
self.assertRaises(_UpperCAmelCase, tokenizer_r.encode, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" )
# Pair input
self.assertRaises(_UpperCAmelCase, tokenizer_r.encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" )
# Pair input
self.assertRaises(
_UpperCAmelCase, tokenizer_r.batch_encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length", )
def A_ ( self : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = GPTaTokenizer.from_pretrained(self.tmpdirname, pad_token="<pad>" )
# Simple input
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "This is a simple input"
SCREAMING_SNAKE_CASE__ : Dict = ["This is a simple input looooooooong", "This is a simple input"]
SCREAMING_SNAKE_CASE__ : List[str] = ("This is a simple input", "This is a pair")
SCREAMING_SNAKE_CASE__ : int = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.pad_token_id
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(_UpperCAmelCase, padding="max_length", max_length=3_0, return_tensors="np" )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, truncate=_UpperCAmelCase, return_tensors="np" )
SCREAMING_SNAKE_CASE__ : Any = tokenizer(*_UpperCAmelCase, padding="max_length", max_length=6_0, return_tensors="np" )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, truncate=_UpperCAmelCase, return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1], 3_0 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1], 3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1], 6_0 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1], 5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def A_ ( self : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = "$$$"
SCREAMING_SNAKE_CASE__ : List[str] = GPTaTokenizer.from_pretrained(self.tmpdirname, bos_token=_UpperCAmelCase, add_bos_token=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = "This is a simple input"
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["This is a simple input 1", "This is a simple input 2"]
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer(_UpperCAmelCase )
self.assertEqual(out_s.input_ids[0], _UpperCAmelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.decode(out_s.input_ids )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0], _UpperCAmelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def A_ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def A_ ( self : Dict ) -> str:
"""simple docstring"""
# TODO: change to self.get_tokenizers() when the fast version is implemented
SCREAMING_SNAKE_CASE__ : Any = [self.get_tokenizer(do_lower_case=_UpperCAmelCase, add_bos_token=_UpperCAmelCase )]
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE__ : List[Any] = "Encode this."
SCREAMING_SNAKE_CASE__ : Optional[Any] = "This one too please."
SCREAMING_SNAKE_CASE__ : str = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
encoded_sequence += tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.encode_plus(
_UpperCAmelCase, _UpperCAmelCase, add_special_tokens=_UpperCAmelCase, return_special_tokens_mask=_UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : Any = encoded_sequence_dict["input_ids"]
SCREAMING_SNAKE_CASE__ : Any = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(_UpperCAmelCase ), len(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(_UpperCAmelCase )
]
SCREAMING_SNAKE_CASE__ : List[Any] = [x for x in filtered_sequence if x is not None]
self.assertEqual(_UpperCAmelCase, _UpperCAmelCase )
@require_tokenizers
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Optional[Any] ) -> int:
"""simple docstring"""
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoTokenizer.from_pretrained("facebook/opt-350m", from_slow=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = "A photo of a cat"
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.encode(
_UpperCAmelCase, )
self.assertEqual(_UpperCAmelCase, [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained("test_opt" )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoTokenizer.from_pretrained("./test_opt" )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.encode(
_UpperCAmelCase, )
self.assertEqual(_UpperCAmelCase, [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
def A_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = AutoTokenizer.from_pretrained("facebook/opt-350m", use_slow=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "A photo of a cat"
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.encode(
_UpperCAmelCase, )
# Same as above
self.assertEqual(_UpperCAmelCase, [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def A_ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = AutoTokenizer.from_pretrained("facebook/opt-350m", from_slow=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = "bos"
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.get_vocab()["bos"]
SCREAMING_SNAKE_CASE__ : Tuple = "A photo of a cat"
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.encode(
_UpperCAmelCase, )
# We changed the bos token
self.assertEqual(_UpperCAmelCase, [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained("./tok" )
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.encode(
_UpperCAmelCase, )
self.assertEqual(_UpperCAmelCase, [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
| 663
| 1
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
snake_case__ : Tuple = '3'
print('Python version:', sys.version)
print('OS platform:', platform.platform())
print('OS architecture:', platform.machine())
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
except ImportError:
print('Torch version:', None)
try:
import transformers
print('transformers version:', transformers.__version__)
except ImportError:
print('transformers version:', None)
| 592
|
def lowerCamelCase__ ( _lowerCamelCase = 1000 ) ->int:
_UpperCAmelCase =2**power
_UpperCAmelCase =str(_lowerCamelCase )
_UpperCAmelCase =list(_lowerCamelCase )
_UpperCAmelCase =0
for i in list_num:
sum_of_num += int(_lowerCamelCase )
return sum_of_num
if __name__ == "__main__":
snake_case__ : List[str] = int(input('Enter the power of 2: ').strip())
print('2 ^ ', power, ' = ', 2**power)
snake_case__ : Union[str, Any] = solution(power)
print('Sum of the digits is: ', result)
| 592
| 1
|
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def A__ ( __A : Optional[Any] , __A : Dict , __A : Optional[Any] ) ->Optional[Any]:
# Initialise PyTorch model
__A =AlbertConfig.from_json_file(__lowerCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
__A =AlbertForPreTraining(__lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_albert(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __lowerCamelCase )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--albert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained ALBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 184
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[Any] ={
'''configuration_jukebox''': [
'''JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''JukeboxConfig''',
'''JukeboxPriorConfig''',
'''JukeboxVQVAEConfig''',
],
'''tokenization_jukebox''': ['''JukeboxTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any =[
'''JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''JukeboxModel''',
'''JukeboxPreTrainedModel''',
'''JukeboxVQVAE''',
'''JukeboxPrior''',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
lowerCAmelCase : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 172
| 0
|
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
lowercase : List[Any] = logging.getLogger(__name__)
if __name__ == "__main__":
lowercase : str = argparse.ArgumentParser(
description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'
)
parser.add_argument(
'--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.'
)
parser.add_argument(
'--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.'
)
parser.add_argument('--vocab_size', default=30522, type=int)
lowercase : List[str] = parser.parse_args()
logger.info(f"Loading data from {args.data_file}")
with open(args.data_file, 'rb') as fp:
lowercase : Any = pickle.load(fp)
logger.info('Counting occurrences for MLM.')
lowercase : Any = Counter()
for tk_ids in data:
counter.update(tk_ids)
lowercase : Union[str, Any] = [0] * args.vocab_size
for k, v in counter.items():
lowercase : Optional[Any] = v
logger.info(f"Dump to {args.token_counts_dump}")
with open(args.token_counts_dump, 'wb') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 94
|
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
lowercase : List[str] = collections.namedtuple('_Datasets', ['train', 'validation', 'test'])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
lowercase : List[Any] = 'https://storage.googleapis.com/cvdf-datasets/mnist/'
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any]) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : str = numpy.dtype(numpy.uintaa).newbyteorder(">")
return numpy.frombuffer(bytestream.read(4) , dtype=_lowerCamelCase)[0]
@deprecated(_lowerCamelCase , "Please use tf.data to implement this functionality.")
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> Any:
'''simple docstring'''
print("Extracting" , f.name)
with gzip.GzipFile(fileobj=_lowerCamelCase) as bytestream:
__UpperCamelCase : str = _readaa(_lowerCamelCase)
if magic != 2_051:
raise ValueError(
"Invalid magic number %d in MNIST image file: %s" % (magic, f.name))
__UpperCamelCase : List[str] = _readaa(_lowerCamelCase)
__UpperCamelCase : Dict = _readaa(_lowerCamelCase)
__UpperCamelCase : Optional[int] = _readaa(_lowerCamelCase)
__UpperCamelCase : Dict = bytestream.read(rows * cols * num_images)
__UpperCamelCase : Optional[int] = numpy.frombuffer(_lowerCamelCase , dtype=numpy.uinta)
__UpperCamelCase : Dict = data.reshape(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , 1)
return data
@deprecated(_lowerCamelCase , "Please use tf.one_hot on tensors.")
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] , _lowerCamelCase : List[str]) -> str:
'''simple docstring'''
__UpperCamelCase : str = labels_dense.shape[0]
__UpperCamelCase : str = numpy.arange(_lowerCamelCase) * num_classes
__UpperCamelCase : str = numpy.zeros((num_labels, num_classes))
__UpperCamelCase : Tuple = 1
return labels_one_hot
@deprecated(_lowerCamelCase , "Please use tf.data to implement this functionality.")
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int]=False , _lowerCamelCase : Optional[int]=10) -> Dict:
'''simple docstring'''
print("Extracting" , f.name)
with gzip.GzipFile(fileobj=_lowerCamelCase) as bytestream:
__UpperCamelCase : int = _readaa(_lowerCamelCase)
if magic != 2_049:
raise ValueError(
"Invalid magic number %d in MNIST label file: %s" % (magic, f.name))
__UpperCamelCase : Any = _readaa(_lowerCamelCase)
__UpperCamelCase : List[Any] = bytestream.read(_lowerCamelCase)
__UpperCamelCase : Dict = numpy.frombuffer(_lowerCamelCase , dtype=numpy.uinta)
if one_hot:
return _dense_to_one_hot(_lowerCamelCase , _lowerCamelCase)
return labels
class lowerCamelCase__ :
'''simple docstring'''
@deprecated(
a , "Please use alternatives such as official/mnist/_DataSet.py"
" from tensorflow/models." , )
def __init__( self :int , a :Any , a :List[str] , a :Union[str, Any]=False , a :List[Any]=False , a :Dict=dtypes.floataa , a :int=True , a :Optional[int]=None , ) -> List[str]:
__UpperCamelCase , __UpperCamelCase : Optional[int] = random_seed.get_seed(a )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
__UpperCamelCase : Optional[Any] = dtypes.as_dtype(a ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype )
if fake_data:
__UpperCamelCase : str = 1_0_0_0_0
__UpperCamelCase : Optional[Any] = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'images.shape: {images.shape} labels.shape: {labels.shape}'
__UpperCamelCase : Any = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__UpperCamelCase : Union[str, Any] = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__UpperCamelCase : List[Any] = images.astype(numpy.floataa )
__UpperCamelCase : Optional[Any] = numpy.multiply(a , 1.0 / 255.0 )
__UpperCamelCase : Optional[Any] = images
__UpperCamelCase : List[Any] = labels
__UpperCamelCase : str = 0
__UpperCamelCase : Union[str, Any] = 0
@property
def _lowerCamelCase ( self :Any ) -> Any:
return self._images
@property
def _lowerCamelCase ( self :Any ) -> Dict:
return self._labels
@property
def _lowerCamelCase ( self :List[str] ) -> str:
return self._num_examples
@property
def _lowerCamelCase ( self :Tuple ) -> Dict:
return self._epochs_completed
def _lowerCamelCase ( self :Any , a :Optional[int] , a :Optional[int]=False , a :int=True ) -> Optional[int]:
if fake_data:
__UpperCamelCase : Any = [1] * 7_8_4
__UpperCamelCase : Union[str, Any] = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(a )],
[fake_label for _ in range(a )],
)
__UpperCamelCase : str = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__UpperCamelCase : Any = numpy.arange(self._num_examples )
numpy.random.shuffle(a )
__UpperCamelCase : int = self.images[perma]
__UpperCamelCase : str = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__UpperCamelCase : Optional[int] = self._num_examples - start
__UpperCamelCase : Optional[int] = self._images[start : self._num_examples]
__UpperCamelCase : int = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__UpperCamelCase : Optional[int] = numpy.arange(self._num_examples )
numpy.random.shuffle(a )
__UpperCamelCase : Optional[Any] = self.images[perm]
__UpperCamelCase : Tuple = self.labels[perm]
# Start next epoch
__UpperCamelCase : Tuple = 0
__UpperCamelCase : Union[str, Any] = batch_size - rest_num_examples
__UpperCamelCase : List[str] = self._index_in_epoch
__UpperCamelCase : Dict = self._images[start:end]
__UpperCamelCase : str = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
__UpperCamelCase : Union[str, Any] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_lowerCamelCase , "Please write your own downloading logic.")
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[str, Any]) -> Tuple:
'''simple docstring'''
if not gfile.Exists(_lowerCamelCase):
gfile.MakeDirs(_lowerCamelCase)
__UpperCamelCase : Optional[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase)
if not gfile.Exists(_lowerCamelCase):
urllib.request.urlretrieve(_lowerCamelCase , _lowerCamelCase) # noqa: S310
with gfile.GFile(_lowerCamelCase) as f:
__UpperCamelCase : Any = f.size()
print("Successfully downloaded" , _lowerCamelCase , _lowerCamelCase , "bytes.")
return filepath
@deprecated(
_lowerCamelCase , "Please use alternatives such as:" " tensorflow_datasets.load('mnist')")
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : List[Any]=False , _lowerCamelCase : str=False , _lowerCamelCase : List[str]=dtypes.floataa , _lowerCamelCase : Any=True , _lowerCamelCase : Union[str, Any]=5_000 , _lowerCamelCase : str=None , _lowerCamelCase : Optional[int]=DEFAULT_SOURCE_URL , ) -> List[Any]:
'''simple docstring'''
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=_lowerCamelCase , one_hot=_lowerCamelCase , dtype=_lowerCamelCase , seed=_lowerCamelCase)
__UpperCamelCase : Optional[int] = fake()
__UpperCamelCase : Tuple = fake()
__UpperCamelCase : List[str] = fake()
return _Datasets(train=_lowerCamelCase , validation=_lowerCamelCase , test=_lowerCamelCase)
if not source_url: # empty string check
__UpperCamelCase : str = DEFAULT_SOURCE_URL
__UpperCamelCase : Optional[int] = "train-images-idx3-ubyte.gz"
__UpperCamelCase : Dict = "train-labels-idx1-ubyte.gz"
__UpperCamelCase : List[str] = "t10k-images-idx3-ubyte.gz"
__UpperCamelCase : List[str] = "t10k-labels-idx1-ubyte.gz"
__UpperCamelCase : Optional[int] = _maybe_download(
_lowerCamelCase , _lowerCamelCase , source_url + train_images_file)
with gfile.Open(_lowerCamelCase , "rb") as f:
__UpperCamelCase : int = _extract_images(_lowerCamelCase)
__UpperCamelCase : Optional[Any] = _maybe_download(
_lowerCamelCase , _lowerCamelCase , source_url + train_labels_file)
with gfile.Open(_lowerCamelCase , "rb") as f:
__UpperCamelCase : int = _extract_labels(_lowerCamelCase , one_hot=_lowerCamelCase)
__UpperCamelCase : int = _maybe_download(
_lowerCamelCase , _lowerCamelCase , source_url + test_images_file)
with gfile.Open(_lowerCamelCase , "rb") as f:
__UpperCamelCase : Optional[int] = _extract_images(_lowerCamelCase)
__UpperCamelCase : str = _maybe_download(
_lowerCamelCase , _lowerCamelCase , source_url + test_labels_file)
with gfile.Open(_lowerCamelCase , "rb") as f:
__UpperCamelCase : List[str] = _extract_labels(_lowerCamelCase , one_hot=_lowerCamelCase)
if not 0 <= validation_size <= len(_lowerCamelCase):
__UpperCamelCase : str = (
"Validation size should be between 0 and "
F'{len(_lowerCamelCase)}. Received: {validation_size}.'
)
raise ValueError(_lowerCamelCase)
__UpperCamelCase : Any = train_images[:validation_size]
__UpperCamelCase : Optional[Any] = train_labels[:validation_size]
__UpperCamelCase : Optional[int] = train_images[validation_size:]
__UpperCamelCase : Tuple = train_labels[validation_size:]
__UpperCamelCase : List[str] = {"dtype": dtype, "reshape": reshape, "seed": seed}
__UpperCamelCase : Union[str, Any] = _DataSet(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase)
__UpperCamelCase : str = _DataSet(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase)
__UpperCamelCase : Optional[Any] = _DataSet(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase)
return _Datasets(train=_lowerCamelCase , validation=_lowerCamelCase , test=_lowerCamelCase)
| 94
| 1
|
import unittest
from knapsack import greedy_knapsack as kp
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : int = [10, 20, 30, 40, 50, 60]
lowerCAmelCase__ : List[str] = [2, 4, 6, 8, 10, 12]
lowerCAmelCase__ : Tuple = 1_00
self.assertEqual(kp.calc_profit(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) ,2_10 )
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
self.assertRaisesRegex(__lowerCamelCase ,'''max_weight must greater than zero.''' )
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
self.assertRaisesRegex(__lowerCamelCase ,'''Weight can not be negative.''' )
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
self.assertRaisesRegex(__lowerCamelCase ,'''Profit can not be negative.''' )
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
self.assertRaisesRegex(__lowerCamelCase ,'''max_weight must greater than zero.''' )
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
self.assertRaisesRegex(
__lowerCamelCase ,'''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 647
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def lowerCAmelCase__ ( lowerCamelCase_ : str ,lowerCamelCase_ : List[str]=False):
'''simple docstring'''
lowerCAmelCase__ : int = []
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""module.blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight"""))
rename_keys.append((f"""module.blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias"""))
rename_keys.append(
(f"""module.blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight"""))
rename_keys.append((f"""module.blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias"""))
rename_keys.append((f"""module.blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight"""))
rename_keys.append((f"""module.blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias"""))
rename_keys.append((f"""module.blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight"""))
rename_keys.append((f"""module.blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias"""))
rename_keys.append((f"""module.blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight"""))
rename_keys.append((f"""module.blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias"""))
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
])
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
])
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase__ : Any = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''') else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
])
return rename_keys
def lowerCAmelCase__ ( lowerCamelCase_ : Dict ,lowerCamelCase_ : List[Any] ,lowerCamelCase_ : List[Any]=False):
'''simple docstring'''
for i in range(config.num_hidden_layers):
if base_model:
lowerCAmelCase__ : List[str] = ''''''
else:
lowerCAmelCase__ : Optional[int] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ : List[str] = state_dict.pop(f"""module.blocks.{i}.attn.qkv.weight""")
lowerCAmelCase__ : List[Any] = state_dict.pop(f"""module.blocks.{i}.attn.qkv.bias""")
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ : Any = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ : Dict = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ : Dict = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase__ ( lowerCamelCase_ : Optional[int]):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ ,lowerCamelCase_)
def lowerCAmelCase__ ( lowerCamelCase_ : Union[str, Any]):
'''simple docstring'''
lowerCAmelCase__ : Any = [
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ ,lowerCamelCase_)
def lowerCAmelCase__ ( lowerCamelCase_ : Dict ,lowerCamelCase_ : int ,lowerCamelCase_ : str):
'''simple docstring'''
lowerCAmelCase__ : str = dct.pop(lowerCamelCase_)
lowerCAmelCase__ : Union[str, Any] = val
def lowerCAmelCase__ ( lowerCamelCase_ : Optional[Any] ,lowerCamelCase_ : List[Any]):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = ViTMSNConfig()
lowerCAmelCase__ : int = 1000
lowerCAmelCase__ : List[Any] = '''datasets/huggingface/label-files'''
lowerCAmelCase__ : Dict = '''imagenet-1k-id2label.json'''
lowerCAmelCase__ : Any = json.load(open(hf_hub_download(lowerCamelCase_ ,lowerCamelCase_) ,'''r'''))
lowerCAmelCase__ : Any = {int(lowerCamelCase_): v for k, v in idalabel.items()}
lowerCAmelCase__ : List[Any] = idalabel
lowerCAmelCase__ : Optional[int] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowerCAmelCase__ : str = 384
lowerCAmelCase__ : Any = 1536
lowerCAmelCase__ : List[str] = 6
elif "l16" in checkpoint_url:
lowerCAmelCase__ : Dict = 1024
lowerCAmelCase__ : int = 4096
lowerCAmelCase__ : Dict = 24
lowerCAmelCase__ : List[str] = 16
lowerCAmelCase__ : List[str] = 0.1
elif "b4" in checkpoint_url:
lowerCAmelCase__ : List[Any] = 4
elif "l7" in checkpoint_url:
lowerCAmelCase__ : Any = 7
lowerCAmelCase__ : Optional[int] = 1024
lowerCAmelCase__ : Optional[int] = 4096
lowerCAmelCase__ : Dict = 24
lowerCAmelCase__ : Optional[Any] = 16
lowerCAmelCase__ : Optional[Any] = 0.1
lowerCAmelCase__ : List[str] = ViTMSNModel(lowerCamelCase_)
lowerCAmelCase__ : int = torch.hub.load_state_dict_from_url(lowerCamelCase_ ,map_location='''cpu''')['''target_encoder''']
lowerCAmelCase__ : List[str] = ViTImageProcessor(size=config.image_size)
remove_projection_head(lowerCamelCase_)
lowerCAmelCase__ : Optional[int] = create_rename_keys(lowerCamelCase_ ,base_model=lowerCamelCase_)
for src, dest in rename_keys:
rename_key(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_)
read_in_q_k_v(lowerCamelCase_ ,lowerCamelCase_ ,base_model=lowerCamelCase_)
model.load_state_dict(lowerCamelCase_)
model.eval()
lowerCAmelCase__ : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase__ : Union[str, Any] = Image.open(requests.get(lowerCamelCase_ ,stream=lowerCamelCase_).raw)
lowerCAmelCase__ : Tuple = ViTImageProcessor(
size=config.image_size ,image_mean=lowerCamelCase_ ,image_std=lowerCamelCase_)
lowerCAmelCase__ : int = image_processor(images=lowerCamelCase_ ,return_tensors='''pt''')
# forward pass
torch.manual_seed(2)
lowerCAmelCase__ : Optional[int] = model(**lowerCamelCase_)
lowerCAmelCase__ : Union[str, Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowerCAmelCase__ : int = torch.tensor([[-1.0915, -1.4876, -1.1809]])
elif "b16" in checkpoint_url:
lowerCAmelCase__ : List[str] = torch.tensor([[14.2889, -18.9045, 11.7281]])
elif "l16" in checkpoint_url:
lowerCAmelCase__ : Union[str, Any] = torch.tensor([[41.5028, -22.8681, 45.6475]])
elif "b4" in checkpoint_url:
lowerCAmelCase__ : Dict = torch.tensor([[-4.3868, 5.2932, -0.4137]])
else:
lowerCAmelCase__ : Optional[int] = torch.tensor([[-0.1792, -0.6465, 2.4263]])
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] ,lowerCamelCase_ ,atol=1E-4)
print(f"""Saving model to {pytorch_dump_folder_path}""")
model.save_pretrained(lowerCamelCase_)
print(f"""Saving image processor to {pytorch_dump_folder_path}""")
image_processor.save_pretrained(lowerCamelCase_)
if __name__ == "__main__":
__snake_case : List[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__snake_case : List[str] =parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 647
| 1
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCamelCase : str = logging.get_logger(__name__)
__lowerCamelCase : Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__lowerCamelCase : Any = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
__lowerCamelCase : Any = {
"gpt-neox-20b": 2_048,
}
class __magic_name__ ( A__ ):
lowercase : List[Any] =VOCAB_FILES_NAMES
lowercase : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
lowercase : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : str =['''input_ids''', '''attention_mask''']
def __init__( self : str , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : int=None , UpperCamelCase__ : List[str]="<|endoftext|>" , UpperCamelCase__ : Any="<|endoftext|>" , UpperCamelCase__ : Optional[int]="<|endoftext|>" , UpperCamelCase__ : List[Any]=False , **UpperCamelCase__ : Union[str, Any] , ) -> Tuple:
'''simple docstring'''
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCamelCase__ ) != add_prefix_space:
UpperCAmelCase = getattr(UpperCamelCase__ , pre_tok_state.pop("type" ) )
UpperCAmelCase = add_prefix_space
UpperCAmelCase = pre_tok_class(**UpperCamelCase__ )
UpperCAmelCase = add_prefix_space
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
UpperCAmelCase = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCamelCase__ : "Conversation" ) -> List[int]:
'''simple docstring'''
UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
UpperCAmelCase = input_ids[-self.model_max_length :]
return input_ids
| 457
|
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class __magic_name__ :
def __init__( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : str=1_00 , UpperCamelCase__ : List[str]=13 , UpperCamelCase__ : Optional[int]=30 , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : List[str]=3 , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : List[Any]=32 , UpperCamelCase__ : Any=4 , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : Tuple=37 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : int=10 , UpperCamelCase__ : Tuple=0.02 , UpperCamelCase__ : str=3 , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Dict=[0, 1, 2, 3] , ) -> str:
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = 1_00
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = scope
UpperCAmelCase = out_indices
UpperCAmelCase = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase = (image_size // patch_size) ** 2
UpperCAmelCase = num_patches + 1
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> str:
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = BeitModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCAmelCase = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase = BeitForMaskedImageModeling(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCAmelCase = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int ) -> str:
'''simple docstring'''
UpperCAmelCase = self.type_sequence_label_size
UpperCAmelCase = BeitForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCAmelCase = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase = 1
UpperCAmelCase = BeitForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase = self.num_labels
UpperCAmelCase = BeitForSemanticSegmentation(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCAmelCase = model(UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
UpperCAmelCase = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( A__, A__, unittest.TestCase ):
lowercase : Optional[Any] =(
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase : Optional[Any] =(
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase : Dict =False
lowercase : int =False
lowercase : Union[str, Any] =False
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
UpperCAmelCase = BeitModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Tuple:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> int:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(UpperCamelCase__ )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> int:
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> int:
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(UpperCamelCase__ ), BeitForMaskedImageModeling]:
continue
UpperCAmelCase = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
UpperCAmelCase = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
UpperCAmelCase = model(**UpperCamelCase__ ).loss
loss.backward()
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCAmelCase = False
UpperCAmelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(UpperCamelCase__ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCAmelCase = model_class(UpperCamelCase__ )
model.gradient_checkpointing_enable()
model.to(UpperCamelCase__ )
model.train()
UpperCAmelCase = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
UpperCAmelCase = model(**UpperCamelCase__ ).loss
loss.backward()
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = _config_zero_init(UpperCamelCase__ )
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(config=UpperCamelCase__ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ) -> str:
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = BeitModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def lowerCamelCase_() -> str:
UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Any:
'''simple docstring'''
UpperCAmelCase = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(UpperCamelCase__ )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=UpperCamelCase__ , return_tensors="pt" ).pixel_values.to(UpperCamelCase__ )
# prepare bool_masked_pos
UpperCAmelCase = torch.ones((1, 1_96) , dtype=torch.bool ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(pixel_values=UpperCamelCase__ , bool_masked_pos=UpperCamelCase__ )
UpperCAmelCase = outputs.logits
# verify the logits
UpperCAmelCase = torch.Size((1, 1_96, 81_92) )
self.assertEqual(logits.shape , UpperCamelCase__ )
UpperCAmelCase = torch.tensor(
[[-3.24_37, 0.50_72, -13.91_74], [-3.24_56, 0.49_48, -13.94_01], [-3.20_33, 0.51_21, -13.85_50]] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , UpperCamelCase__ , atol=1e-2 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(UpperCamelCase__ )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=UpperCamelCase__ , return_tensors="pt" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**UpperCamelCase__ )
UpperCAmelCase = outputs.logits
# verify the logits
UpperCAmelCase = torch.Size((1, 10_00) )
self.assertEqual(logits.shape , UpperCamelCase__ )
UpperCAmelCase = torch.tensor([-1.23_85, -1.09_87, -1.01_08] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
UpperCAmelCase = 2_81
self.assertEqual(logits.argmax(-1 ).item() , UpperCamelCase__ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
UpperCamelCase__ )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=UpperCamelCase__ , return_tensors="pt" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**UpperCamelCase__ )
UpperCAmelCase = outputs.logits
# verify the logits
UpperCAmelCase = torch.Size((1, 2_18_41) )
self.assertEqual(logits.shape , UpperCamelCase__ )
UpperCAmelCase = torch.tensor([1.68_81, -0.27_87, 0.59_01] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
UpperCAmelCase = 23_96
self.assertEqual(logits.argmax(-1 ).item() , UpperCamelCase__ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
UpperCAmelCase = model.to(UpperCamelCase__ )
UpperCAmelCase = BeitImageProcessor(do_resize=UpperCamelCase__ , size=6_40 , do_center_crop=UpperCamelCase__ )
UpperCAmelCase = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
UpperCAmelCase = Image.open(ds[0]["file"] )
UpperCAmelCase = image_processor(images=UpperCamelCase__ , return_tensors="pt" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**UpperCamelCase__ )
UpperCAmelCase = outputs.logits
# verify the logits
UpperCAmelCase = torch.Size((1, 1_50, 1_60, 1_60) )
self.assertEqual(logits.shape , UpperCamelCase__ )
UpperCAmelCase = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
UpperCAmelCase = torch.tensor(
[
[[-4.92_25, -2.39_54, -3.05_22], [-2.88_22, -1.00_46, -1.75_61], [-2.95_49, -1.32_28, -2.13_47]],
[[-5.81_68, -3.41_29, -4.07_78], [-3.86_51, -2.22_14, -3.02_77], [-3.83_56, -2.46_43, -3.35_35]],
[[-0.00_78, 3.99_52, 4.07_54], [2.98_56, 4.69_44, 5.00_35], [3.24_13, 4.78_13, 4.99_69]],
] , device=UpperCamelCase__ , )
else:
UpperCAmelCase = torch.tensor(
[
[[-4.89_60, -2.36_88, -3.03_55], [-2.84_78, -0.98_36, -1.74_18], [-2.94_49, -1.33_32, -2.14_56]],
[[-5.80_81, -3.41_24, -4.10_06], [-3.85_61, -2.20_81, -3.03_23], [-3.83_65, -2.46_01, -3.36_69]],
[[-0.03_09, 3.98_68, 4.05_40], [2.96_40, 4.68_77, 4.99_76], [3.20_81, 4.76_90, 4.99_42]],
] , device=UpperCamelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
UpperCAmelCase = model.to(UpperCamelCase__ )
UpperCAmelCase = BeitImageProcessor(do_resize=UpperCamelCase__ , size=6_40 , do_center_crop=UpperCamelCase__ )
UpperCAmelCase = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
UpperCAmelCase = Image.open(ds[0]["file"] )
UpperCAmelCase = image_processor(images=UpperCamelCase__ , return_tensors="pt" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**UpperCamelCase__ )
UpperCAmelCase = outputs.logits.detach().cpu()
UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ , target_sizes=[(5_00, 3_00)] )
UpperCAmelCase = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ )
UpperCAmelCase = torch.Size((1_60, 1_60) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
| 457
| 1
|
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class a ( __snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def UpperCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any]=0 ) -> Dict:
lowerCamelCase_ = np.random.RandomState(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase ( self : Dict ) -> Union[str, Any]:
lowerCamelCase_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.get_dummy_inputs()
lowerCamelCase_ = pipe(**__SCREAMING_SNAKE_CASE ).images
lowerCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase_ = np.array([0.65_072, 0.58_492, 0.48_219, 0.55_521, 0.53_180, 0.55_939, 0.50_697, 0.39_800, 0.46_455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase ( self : List[str] ) -> Any:
lowerCamelCase_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCamelCase_ = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.get_dummy_inputs()
lowerCamelCase_ = pipe(**__SCREAMING_SNAKE_CASE ).images
lowerCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase_ = np.array([0.65_863, 0.59_425, 0.49_326, 0.56_313, 0.53_875, 0.56_627, 0.51_065, 0.39_777, 0.46_330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase ( self : Tuple ) -> Dict:
lowerCamelCase_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCamelCase_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.get_dummy_inputs()
lowerCamelCase_ = pipe(**__SCREAMING_SNAKE_CASE ).images
lowerCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase_ = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase ( self : str ) -> Union[str, Any]:
lowerCamelCase_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCamelCase_ = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.get_dummy_inputs()
lowerCamelCase_ = pipe(**__SCREAMING_SNAKE_CASE ).images
lowerCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase_ = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase ( self : Any ) -> Any:
lowerCamelCase_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCamelCase_ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.get_dummy_inputs()
lowerCamelCase_ = pipe(**__SCREAMING_SNAKE_CASE ).images
lowerCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase_ = np.array([0.53_817, 0.60_812, 0.47_384, 0.49_530, 0.51_894, 0.49_814, 0.47_984, 0.38_958, 0.44_271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase ( self : int ) -> List[str]:
lowerCamelCase_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCamelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.get_dummy_inputs()
lowerCamelCase_ = pipe(**__SCREAMING_SNAKE_CASE ).images
lowerCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase_ = np.array([0.53_895, 0.60_808, 0.47_933, 0.49_608, 0.51_886, 0.49_950, 0.48_053, 0.38_957, 0.44_200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase ( self : Tuple ) -> int:
lowerCamelCase_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.get_dummy_inputs()
lowerCamelCase_ = 3 * [inputs['prompt']]
# forward
lowerCamelCase_ = pipe(**__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = output.images[0, -3:, -3:, -1]
lowerCamelCase_ = self.get_dummy_inputs()
lowerCamelCase_ = 3 * [inputs.pop('prompt' )]
lowerCamelCase_ = pipe.tokenizer(
__SCREAMING_SNAKE_CASE , padding='max_length' , max_length=pipe.tokenizer.model_max_length , truncation=__SCREAMING_SNAKE_CASE , return_tensors='np' , )
lowerCamelCase_ = text_inputs['input_ids']
lowerCamelCase_ = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
lowerCamelCase_ = prompt_embeds
# forward
lowerCamelCase_ = pipe(**__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def UpperCamelCase ( self : str ) -> Tuple:
lowerCamelCase_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.get_dummy_inputs()
lowerCamelCase_ = 3 * ['this is a negative prompt']
lowerCamelCase_ = negative_prompt
lowerCamelCase_ = 3 * [inputs['prompt']]
# forward
lowerCamelCase_ = pipe(**__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = output.images[0, -3:, -3:, -1]
lowerCamelCase_ = self.get_dummy_inputs()
lowerCamelCase_ = 3 * [inputs.pop('prompt' )]
lowerCamelCase_ = []
for p in [prompt, negative_prompt]:
lowerCamelCase_ = pipe.tokenizer(
__SCREAMING_SNAKE_CASE , padding='max_length' , max_length=pipe.tokenizer.model_max_length , truncation=__SCREAMING_SNAKE_CASE , return_tensors='np' , )
lowerCamelCase_ = text_inputs['input_ids']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
lowerCamelCase_ , lowerCamelCase_ = embeds
# forward
lowerCamelCase_ = pipe(**__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class a ( unittest.TestCase ):
@property
def UpperCamelCase ( self : Union[str, Any] ) -> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase ( self : str ) -> Tuple:
lowerCamelCase_ = ort.SessionOptions()
lowerCamelCase_ = False
return options
def UpperCamelCase ( self : Tuple ) -> Dict:
# using the PNDM scheduler by default
lowerCamelCase_ = OnnxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = 'A painting of a squirrel eating a burger'
np.random.seed(0 )
lowerCamelCase_ = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type='np' )
lowerCamelCase_ = output.images
lowerCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ = np.array([0.0_452, 0.0_390, 0.0_087, 0.0_350, 0.0_617, 0.0_364, 0.0_544, 0.0_523, 0.0_720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCamelCase ( self : Tuple ) -> List[str]:
lowerCamelCase_ = DDIMScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
lowerCamelCase_ = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = 'open neural network exchange'
lowerCamelCase_ = np.random.RandomState(0 )
lowerCamelCase_ = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=__SCREAMING_SNAKE_CASE , output_type='np' )
lowerCamelCase_ = output.images
lowerCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ = np.array([0.2_867, 0.1_974, 0.1_481, 0.7_294, 0.7_251, 0.6_667, 0.4_194, 0.5_642, 0.6_486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCamelCase ( self : List[Any] ) -> Any:
lowerCamelCase_ = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
lowerCamelCase_ = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = 'open neural network exchange'
lowerCamelCase_ = np.random.RandomState(0 )
lowerCamelCase_ = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=__SCREAMING_SNAKE_CASE , output_type='np' )
lowerCamelCase_ = output.images
lowerCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ = np.array([0.2_306, 0.1_959, 0.1_593, 0.6_549, 0.6_394, 0.5_408, 0.5_065, 0.6_010, 0.6_161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCamelCase ( self : Optional[int] ) -> List[Any]:
lowerCamelCase_ = 0
def test_callback_fn(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : np.ndarray ) -> None:
lowerCamelCase_ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
lowerCamelCase_ = latents[0, -3:, -3:, -1]
lowerCamelCase_ = np.array(
[-0.6_772, -0.3_835, -1.2_456, 0.1_905, -1.0_974, 0.6_967, -1.9_353, 0.0_178, 1.0_167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
lowerCamelCase_ = latents[0, -3:, -3:, -1]
lowerCamelCase_ = np.array(
[-0.3_351, 0.2_241, -0.1_837, -0.2_325, -0.6_577, 0.3_393, -0.0_241, 0.5_899, 1.3_875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
lowerCamelCase_ = False
lowerCamelCase_ = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = 'Andromeda galaxy in a bottle'
lowerCamelCase_ = np.random.RandomState(0 )
pipe(
prompt=__SCREAMING_SNAKE_CASE , num_inference_steps=5 , guidance_scale=7.5 , generator=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def UpperCamelCase ( self : List[str] ) -> int:
lowerCamelCase_ = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert pipe.safety_checker is None
lowerCamelCase_ = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = OnnxStableDiffusionPipeline.from_pretrained(__SCREAMING_SNAKE_CASE )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowerCamelCase_ = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
| 549
|
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
_SCREAMING_SNAKE_CASE : str = logging.getLogger(__name__)
@dataclass
class a :
SCREAMING_SNAKE_CASE : Optional[int] = field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
SCREAMING_SNAKE_CASE : bool = field(
default=__snake_case , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
SCREAMING_SNAKE_CASE : bool = field(
default=__snake_case , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__snake_case , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__snake_case , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__snake_case , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
@dataclass
class a :
SCREAMING_SNAKE_CASE : str = field(
default=__snake_case , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
SCREAMING_SNAKE_CASE : str = field(
default=__snake_case , metadata={"""help""": """Evaluation language. Also train language if `train_language` is set to None."""} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__snake_case , metadata={"""help""": """Train language if it is different from the evaluation language."""} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__snake_case , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__snake_case , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__snake_case , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
SCREAMING_SNAKE_CASE : Optional[bool] = field(
default=__snake_case , metadata={"""help""": """arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"""} , )
SCREAMING_SNAKE_CASE : bool = field(
default=__snake_case , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
SCREAMING_SNAKE_CASE : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
SCREAMING_SNAKE_CASE : bool = field(
default=__snake_case , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
SCREAMING_SNAKE_CASE : bool = field(
default=__snake_case , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def lowerCamelCase__ ( ) -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_xnli' , _lowerCamelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase_ = training_args.get_process_log_level()
logger.setLevel(_lowerCamelCase )
datasets.utils.logging.set_verbosity(_lowerCamelCase )
transformers.utils.logging.set_verbosity(_lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowerCamelCase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
lowerCamelCase_ = load_dataset(
'xnli' , model_args.language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowerCamelCase_ = load_dataset(
'xnli' , model_args.train_language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = train_dataset.features['label'].names
if training_args.do_eval:
lowerCamelCase_ = load_dataset(
'xnli' , model_args.language , split='validation' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = eval_dataset.features['label'].names
if training_args.do_predict:
lowerCamelCase_ = load_dataset(
'xnli' , model_args.language , split='test' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = predict_dataset.features['label'].names
# Labels
lowerCamelCase_ = len(_lowerCamelCase )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowerCamelCase , idalabel={str(_lowerCamelCase ): label for i, label in enumerate(_lowerCamelCase )} , labelaid={label: i for i, label in enumerate(_lowerCamelCase )} , finetuning_task='xnli' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
lowerCamelCase_ = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCamelCase_ = False
def preprocess_function(_lowerCamelCase : Any ):
# Tokenize the texts
return tokenizer(
examples['premise'] , examples['hypothesis'] , padding=_lowerCamelCase , max_length=data_args.max_seq_length , truncation=_lowerCamelCase , )
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCamelCase_ = min(len(_lowerCamelCase ) , data_args.max_train_samples )
lowerCamelCase_ = train_dataset.select(range(_lowerCamelCase ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
lowerCamelCase_ = train_dataset.map(
_lowerCamelCase , batched=_lowerCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on train dataset' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(_lowerCamelCase ) ) , 3 ):
logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCamelCase_ = min(len(_lowerCamelCase ) , data_args.max_eval_samples )
lowerCamelCase_ = eval_dataset.select(range(_lowerCamelCase ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
lowerCamelCase_ = eval_dataset.map(
_lowerCamelCase , batched=_lowerCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on validation dataset' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
lowerCamelCase_ = min(len(_lowerCamelCase ) , data_args.max_predict_samples )
lowerCamelCase_ = predict_dataset.select(range(_lowerCamelCase ) )
with training_args.main_process_first(desc='prediction dataset map pre-processing' ):
lowerCamelCase_ = predict_dataset.map(
_lowerCamelCase , batched=_lowerCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on prediction dataset' , )
# Get the metric function
lowerCamelCase_ = evaluate.load('xnli' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_lowerCamelCase : EvalPrediction ):
lowerCamelCase_ = p.predictions[0] if isinstance(p.predictions , _lowerCamelCase ) else p.predictions
lowerCamelCase_ = np.argmax(_lowerCamelCase , axis=1 )
return metric.compute(predictions=_lowerCamelCase , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCamelCase_ = default_data_collator
elif training_args.fpaa:
lowerCamelCase_ = DataCollatorWithPadding(_lowerCamelCase , pad_to_multiple_of=8 )
else:
lowerCamelCase_ = None
# Initialize our Trainer
lowerCamelCase_ = Trainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_lowerCamelCase , tokenizer=_lowerCamelCase , data_collator=_lowerCamelCase , )
# Training
if training_args.do_train:
lowerCamelCase_ = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ = last_checkpoint
lowerCamelCase_ = trainer.train(resume_from_checkpoint=_lowerCamelCase )
lowerCamelCase_ = train_result.metrics
lowerCamelCase_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowerCamelCase )
)
lowerCamelCase_ = min(_lowerCamelCase , len(_lowerCamelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , _lowerCamelCase )
trainer.save_metrics('train' , _lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowerCamelCase_ = trainer.evaluate(eval_dataset=_lowerCamelCase )
lowerCamelCase_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowerCamelCase )
lowerCamelCase_ = min(_lowerCamelCase , len(_lowerCamelCase ) )
trainer.log_metrics('eval' , _lowerCamelCase )
trainer.save_metrics('eval' , _lowerCamelCase )
# Prediction
if training_args.do_predict:
logger.info('*** Predict ***' )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = trainer.predict(_lowerCamelCase , metric_key_prefix='predict' )
lowerCamelCase_ = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(_lowerCamelCase )
)
lowerCamelCase_ = min(_lowerCamelCase , len(_lowerCamelCase ) )
trainer.log_metrics('predict' , _lowerCamelCase )
trainer.save_metrics('predict' , _lowerCamelCase )
lowerCamelCase_ = np.argmax(_lowerCamelCase , axis=1 )
lowerCamelCase_ = os.path.join(training_args.output_dir , 'predictions.txt' )
if trainer.is_world_process_zero():
with open(_lowerCamelCase , 'w' ) as writer:
writer.write('index\tprediction\n' )
for index, item in enumerate(_lowerCamelCase ):
lowerCamelCase_ = label_list[item]
writer.write(F'''{index}\t{item}\n''' )
if __name__ == "__main__":
main()
| 549
| 1
|
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def __snake_case (__UpperCAmelCase , __UpperCAmelCase = True , __UpperCAmelCase = math.inf , __UpperCAmelCase = -math.inf , __UpperCAmelCase = math.inf , __UpperCAmelCase = -math.inf , __UpperCAmelCase = False , __UpperCAmelCase = 100 , __UpperCAmelCase = 0.01 , __UpperCAmelCase = 1 , ):
"""simple docstring"""
lowerCamelCase_ : List[str] = False
lowerCamelCase_ : Any = search_prob
lowerCamelCase_ : Optional[int] = start_temperate
lowerCamelCase_ : Optional[Any] = []
lowerCamelCase_ : Tuple = 0
lowerCamelCase_ : Dict = None
while not search_end:
lowerCamelCase_ : List[str] = current_state.score()
if best_state is None or current_score > best_state.score():
lowerCamelCase_ : str = current_state
scores.append(__UpperCAmelCase )
iterations += 1
lowerCamelCase_ : List[str] = None
lowerCamelCase_ : Optional[Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
lowerCamelCase_ : int = random.randint(0 , len(__UpperCAmelCase ) - 1 ) # picking a random neighbor
lowerCamelCase_ : List[str] = neighbors.pop(__UpperCAmelCase )
lowerCamelCase_ : Optional[int] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
lowerCamelCase_ : Union[str, Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
lowerCamelCase_ : Optional[Any] = picked_neighbor
else:
lowerCamelCase_ : Tuple = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
lowerCamelCase_ : Tuple = picked_neighbor
lowerCamelCase_ : Tuple = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
lowerCamelCase_ : Tuple = True
else:
lowerCamelCase_ : str = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(__UpperCAmelCase ) , __UpperCAmelCase )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def __snake_case (__UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
__lowerCamelCase : List[Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__lowerCamelCase : Union[str, Any] = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
f"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
__lowerCamelCase : List[Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__lowerCamelCase : List[str] = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
f"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def __snake_case (__UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
return (3 * x**2) - (6 * y)
__lowerCamelCase : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__lowerCamelCase : int = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
f"""{local_min.score()}"""
)
__lowerCamelCase : str = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__lowerCamelCase : List[Any] = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
f"""{local_min.score()}"""
)
| 703
|
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__lowerCamelCase : Optional[int] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
__lowerCamelCase : Any = """ \"\"\"
Output class for the scheduler's step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"\"\"
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
"""
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
lowerCamelCase_ : int = self.diffusers_dir
shutil.copy(
os.path.join(UpperCamelCase_ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : Tuple = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def __UpperCamelCase ( self : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any]=None ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ : List[str] = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowerCamelCase_ : Optional[int] = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowerCamelCase_ : int = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCamelCase_ : List[str] = black.format_str(UpperCamelCase_ , mode=UpperCamelCase_ )
lowerCamelCase_ : Any = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(UpperCamelCase_ , '''w''' , newline='''\n''' ) as f:
f.write(UpperCamelCase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(UpperCamelCase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=UpperCamelCase_ )
with open(UpperCamelCase_ , '''r''' ) as f:
self.assertTrue(f.read() , UpperCamelCase_ )
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : str = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCamelCase ( self : int ) -> int:
"""simple docstring"""
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , UpperCamelCase_ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , UpperCamelCase_ ) , )
# Copy consistency with a really long name
lowerCamelCase_ : Optional[int] = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , UpperCamelCase_ , UpperCamelCase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , UpperCamelCase_ , overwrite_result=re.sub('''DDPM''' , '''Test''' , UpperCamelCase_ ) , )
| 418
| 0
|
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _a :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=3 , __UpperCAmelCase=32 , __UpperCAmelCase=3 , __UpperCAmelCase=10 , __UpperCAmelCase=[10, 20, 30, 40] , __UpperCAmelCase=[1, 1, 2, 1] , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase="relu" , __UpperCAmelCase=3 , __UpperCAmelCase=None , ):
"""simple docstring"""
a__ : Any = parent
a__ : Union[str, Any] = batch_size
a__ : Any = image_size
a__ : Optional[Any] = num_channels
a__ : List[str] = embeddings_size
a__ : Any = hidden_sizes
a__ : Union[str, Any] = depths
a__ : List[Any] = is_training
a__ : Optional[Any] = use_labels
a__ : Dict = hidden_act
a__ : Tuple = num_labels
a__ : int = scope
a__ : Any = len(__UpperCAmelCase )
def _A ( self ):
"""simple docstring"""
a__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ : List[str] = None
if self.use_labels:
a__ : Any = ids_tensor([self.batch_size] , self.num_labels )
a__ : List[Any] = self.get_config()
return config, pixel_values, labels
def _A ( self ):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
a__ : Dict = RegNetModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
a__ : str = model(__UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
a__ : Optional[Any] = self.num_labels
a__ : List[str] = RegNetForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
a__ : Dict = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A ( self ):
"""simple docstring"""
a__ : str = self.prepare_config_and_inputs()
a__ : Dict = config_and_inputs
a__ : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A :Optional[Any] = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
A :Union[str, Any] = (
{'feature-extraction': RegNetModel, 'image-classification': RegNetForImageClassification}
if is_torch_available()
else {}
)
A :Optional[int] = False
A :int = False
A :str = False
A :Dict = False
def _A ( self ):
"""simple docstring"""
a__ : Optional[int] = RegNetModelTester(self )
a__ : int = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase )
def _A ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _A ( self ):
"""simple docstring"""
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def _A ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def _A ( self ):
"""simple docstring"""
pass
def _A ( self ):
"""simple docstring"""
a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Optional[int] = model_class(__UpperCAmelCase )
a__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : str = [*signature.parameters.keys()]
a__ : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def _A ( self ):
"""simple docstring"""
a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def _A ( self ):
"""simple docstring"""
a__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : List[Any] = model_class(config=__UpperCAmelCase )
for name, module in model.named_modules():
if isinstance(__UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
def _A ( self ):
"""simple docstring"""
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
a__ : Optional[Any] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
a__ : List[str] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
a__ : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a__ : List[Any] = self.model_tester.num_stages
self.assertEqual(len(__UpperCAmelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
a__ : Union[str, Any] = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
a__ : int = layer_type
a__ : Optional[int] = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a__ : Optional[Any] = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _A ( self ):
"""simple docstring"""
a__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
@slow
def _A ( self ):
"""simple docstring"""
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : List[Any] = RegNetModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def SCREAMING_SNAKE_CASE( ) -> int:
a__ : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _A ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _A ( self ):
"""simple docstring"""
a__ : Tuple = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__UpperCAmelCase )
a__ : Optional[Any] = self.default_image_processor
a__ : Optional[int] = prepare_img()
a__ : Tuple = image_processor(images=__UpperCAmelCase , return_tensors="pt" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
a__ : str = model(**__UpperCAmelCase )
# verify the logits
a__ : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
a__ : str = torch.tensor([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 191
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( lowercase__ ) -> float:
return 1_0 - x * x
def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ) -> float:
# Bolzano theory in order to find if there is a root between a and b
if equation(lowercase__ ) * equation(lowercase__ ) >= 0:
raise ValueError("Wrong space!" )
lowerCAmelCase__ : Union[str, Any] = a
while (b - a) >= 0.01:
# Find middle point
lowerCAmelCase__ : int = (a + b) / 2
# Check if middle point is root
if equation(lowercase__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(lowercase__ ) * equation(lowercase__ ) < 0:
lowerCAmelCase__ : str = c
else:
lowerCAmelCase__ : List[str] = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 453
| 0
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase ) -> float:
UpperCAmelCase_ : List[Any] = sorted(numsa + numsa )
UpperCAmelCase_ , UpperCAmelCase_ : int = divmod(len(UpperCamelCase ) ,2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = [float(x) for x in input("Enter the elements of first array: ").split()]
lowerCAmelCase__ = [float(x) for x in input("Enter the elements of second array: ").split()]
print(F'The median of two arrays is: {median_of_two_arrays(array_a, array_a)}')
| 471
|
'''simple docstring'''
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Tuple:
# load base model
UpperCAmelCase_ : List[str] = StableDiffusionPipeline.from_pretrained(UpperCamelCase ,torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
UpperCAmelCase_ : Union[str, Any] = load_file(UpperCamelCase )
UpperCAmelCase_ : Any = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
UpperCAmelCase_ : Optional[Any] = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' )
UpperCAmelCase_ : str = pipeline.text_encoder
else:
UpperCAmelCase_ : List[str] = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' )
UpperCAmelCase_ : List[str] = pipeline.unet
# find the target layer
UpperCAmelCase_ : Dict = layer_infos.pop(0 )
while len(UpperCamelCase ) > -1:
try:
UpperCAmelCase_ : List[str] = curr_layer.__getattr__(UpperCamelCase )
if len(UpperCamelCase ) > 0:
UpperCAmelCase_ : Tuple = layer_infos.pop(0 )
elif len(UpperCamelCase ) == 0:
break
except Exception:
if len(UpperCamelCase ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
UpperCAmelCase_ : List[Any] = layer_infos.pop(0 )
UpperCAmelCase_ : str = []
if "lora_down" in key:
pair_keys.append(key.replace('lora_down' ,'lora_up' ) )
pair_keys.append(UpperCamelCase )
else:
pair_keys.append(UpperCamelCase )
pair_keys.append(key.replace('lora_up' ,'lora_down' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
UpperCAmelCase_ : Dict = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
UpperCAmelCase_ : List[str] = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(UpperCamelCase ,UpperCamelCase ).unsqueeze(2 ).unsqueeze(3 )
else:
UpperCAmelCase_ : Optional[int] = state_dict[pair_keys[0]].to(torch.floataa )
UpperCAmelCase_ : Tuple = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(UpperCamelCase ,UpperCamelCase )
# update visited list
for item in pair_keys:
visited.append(UpperCamelCase )
return pipeline
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
"--base_model_path", default=None, type=str, required=True, help="Path to the base model in diffusers format."
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--lora_prefix_unet", default="lora_unet", type=str, help="The prefix of UNet weight in safetensors"
)
parser.add_argument(
"--lora_prefix_text_encoder",
default="lora_te",
type=str,
help="The prefix of text encoder weight in safetensors",
)
parser.add_argument("--alpha", default=0.75, type=float, help="The merging ratio in W = W0 + alpha * deltaW")
parser.add_argument(
"--to_safetensors", action="store_true", help="Whether to store pipeline in safetensors format or not."
)
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = args.base_model_path
lowerCAmelCase__ = args.checkpoint_path
lowerCAmelCase__ = args.dump_path
lowerCAmelCase__ = args.lora_prefix_unet
lowerCAmelCase__ = args.lora_prefix_text_encoder
lowerCAmelCase__ = args.alpha
lowerCAmelCase__ = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
lowerCAmelCase__ = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 471
| 1
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __A ( unittest.TestCase ):
def _snake_case (self ):
lowerCamelCase__ : Union[str, Any] = tempfile.mkdtemp()
lowerCamelCase__ : Tuple = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowerCamelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowerCamelCase__ : Any = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
"""image_std""": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
lowerCamelCase__ : List[Any] = os.path.join(self.tmpdirname , __magic_name__ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__magic_name__ , __magic_name__ )
def _snake_case (self , **__magic_name__ ):
return BertTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def _snake_case (self , **__magic_name__ ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__magic_name__ )
def _snake_case (self , **__magic_name__ ):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__magic_name__ )
def _snake_case (self ):
shutil.rmtree(self.tmpdirname )
def _snake_case (self ):
lowerCamelCase__ : Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase__ : Any = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case (self ):
lowerCamelCase__ : str = self.get_tokenizer()
lowerCamelCase__ : List[str] = self.get_rust_tokenizer()
lowerCamelCase__ : Dict = self.get_image_processor()
lowerCamelCase__ : Optional[Any] = AlignProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase__ : Dict = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__magic_name__ )
lowerCamelCase__ : List[str] = AlignProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase__ : Optional[int] = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __magic_name__ )
self.assertIsInstance(processor_fast.tokenizer , __magic_name__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __magic_name__ )
self.assertIsInstance(processor_fast.image_processor , __magic_name__ )
def _snake_case (self ):
lowerCamelCase__ : Union[str, Any] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ : Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCamelCase__ : Union[str, Any] = self.get_image_processor(do_normalize=__magic_name__ , padding_value=1.0 )
lowerCamelCase__ : Tuple = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__magic_name__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __magic_name__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __magic_name__ )
def _snake_case (self ):
lowerCamelCase__ : Any = self.get_image_processor()
lowerCamelCase__ : int = self.get_tokenizer()
lowerCamelCase__ : int = AlignProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
lowerCamelCase__ : List[str] = self.prepare_image_inputs()
lowerCamelCase__ : Dict = image_processor(__magic_name__ , return_tensors="""np""" )
lowerCamelCase__ : Optional[int] = processor(images=__magic_name__ , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _snake_case (self ):
lowerCamelCase__ : Union[str, Any] = self.get_image_processor()
lowerCamelCase__ : str = self.get_tokenizer()
lowerCamelCase__ : Union[str, Any] = AlignProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
lowerCamelCase__ : Dict = """lower newer"""
lowerCamelCase__ : int = processor(text=__magic_name__ )
lowerCamelCase__ : Any = tokenizer(__magic_name__ , padding="""max_length""" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _snake_case (self ):
lowerCamelCase__ : List[str] = self.get_image_processor()
lowerCamelCase__ : int = self.get_tokenizer()
lowerCamelCase__ : List[str] = AlignProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
lowerCamelCase__ : List[Any] = """lower newer"""
lowerCamelCase__ : Tuple = self.prepare_image_inputs()
lowerCamelCase__ : str = processor(text=__magic_name__ , images=__magic_name__ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__magic_name__ ):
processor()
def _snake_case (self ):
lowerCamelCase__ : Tuple = self.get_image_processor()
lowerCamelCase__ : Optional[int] = self.get_tokenizer()
lowerCamelCase__ : Optional[Any] = AlignProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
lowerCamelCase__ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase__ : Any = processor.batch_decode(__magic_name__ )
lowerCamelCase__ : Dict = tokenizer.batch_decode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
def _snake_case (self ):
lowerCamelCase__ : Tuple = self.get_image_processor()
lowerCamelCase__ : Dict = self.get_tokenizer()
lowerCamelCase__ : Optional[int] = AlignProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
lowerCamelCase__ : str = """lower newer"""
lowerCamelCase__ : List[Any] = self.prepare_image_inputs()
lowerCamelCase__ : Any = processor(text=__magic_name__ , images=__magic_name__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 157
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase = {
'''configuration_swiftformer''': [
'''SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SwiftFormerConfig''',
'''SwiftFormerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwiftFormerForImageClassification''',
'''SwiftFormerModel''',
'''SwiftFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 157
| 1
|
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class _snake_case ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = [10, 20, 30, 40, 50, 60]
lowerCAmelCase = [2, 4, 6, 8, 10, 12]
lowerCAmelCase = 1_00
self.assertEqual(kp.calc_profit(A_ , A_ , A_ ) , 2_10 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.assertRaisesRegex(A_ , 'max_weight must greater than zero.' )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.assertRaisesRegex(A_ , 'Weight can not be negative.' )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.assertRaisesRegex(A_ , 'Profit can not be negative.' )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.assertRaisesRegex(A_ , 'max_weight must greater than zero.' )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.assertRaisesRegex(
A_ , 'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 700
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _snake_case ( metaclass=a_ ):
SCREAMING_SNAKE_CASE : List[str] = ['''torch''', '''torchsde''']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(self , ['torch', 'torchsde'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls , ['torch', 'torchsde'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls , ['torch', 'torchsde'] )
| 514
| 0
|
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = 1
__SCREAMING_SNAKE_CASE : Union[str, Any] = 2
while i * i <= n:
__SCREAMING_SNAKE_CASE : List[Any] = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = 1
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1
while True:
i += 1
t_num += i
if count_divisors(lowerCAmelCase_ ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 74
|
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any]=7 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : Dict=1_8 , _lowerCAmelCase : Tuple=3_0 , _lowerCAmelCase : Optional[int]=4_0_0 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Any=None , _lowerCAmelCase : List[Any]=True , ) -> Dict:
"""simple docstring"""
snake_case_ = size if size is not None else {"height": 1_8, "width": 1_8}
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = image_size
snake_case_ = min_resolution
snake_case_ = max_resolution
snake_case_ = do_resize
snake_case_ = size
snake_case_ = do_normalize
def lowerCAmelCase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_866_443_634_033_203, 0.6_618_829_369_544_983, 0.3_891_746_401_786_804],
[-0.6_042_559_146_881_104, -0.02_295_008_860_528_469, 0.5_423_797_369_003_296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ImageGPTImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
snake_case_ = ImageGPTImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , "clusters" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "size" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "do_normalize" ) )
def lowerCAmelCase__ ( self : int ) -> Any:
"""simple docstring"""
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 1_8, "width": 1_8} )
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"height": 4_2, "width": 4_2} )
def lowerCAmelCase__ ( self : Tuple ) -> Any:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
snake_case_ = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(_lowerCAmelCase , obj[key] ) )
else:
self.assertEqual(obj[key] , _lowerCAmelCase )
def lowerCAmelCase__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ = os.path.join(_lowerCAmelCase , "image_processor.json" )
image_processor_first.to_json_file(_lowerCAmelCase )
snake_case_ = self.image_processing_class.from_json_file(_lowerCAmelCase ).to_dict()
snake_case_ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_lowerCAmelCase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _lowerCAmelCase )
def lowerCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(_lowerCAmelCase )
snake_case_ = self.image_processing_class.from_pretrained(_lowerCAmelCase ).to_dict()
snake_case_ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_lowerCAmelCase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _lowerCAmelCase )
@unittest.skip("ImageGPT requires clusters at initialization" )
def lowerCAmelCase__ ( self : List[str] ) -> int:
"""simple docstring"""
pass
def _lowerCAmelCase ( )->Optional[Any]:
'''simple docstring'''
snake_case_ = load_dataset("hf-internal-testing/fixtures_image_utils" , split="test" )
snake_case_ = Image.open(dataset[4]["file"] )
snake_case_ = Image.open(dataset[5]["file"] )
snake_case_ = [imagea, imagea]
return images
@require_vision
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase__ ( self : List[str] ) -> int:
"""simple docstring"""
snake_case_ = ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small" )
snake_case_ = prepare_images()
# test non-batched
snake_case_ = image_processing(images[0] , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4) )
snake_case_ = [3_0_6, 1_9_1, 1_9_1]
self.assertEqual(encoding.input_ids[0, :3].tolist() , _lowerCAmelCase )
# test batched
snake_case_ = image_processing(_lowerCAmelCase , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4) )
snake_case_ = [3_0_3, 1_3, 1_3]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , _lowerCAmelCase )
| 283
| 0
|
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class __UpperCAmelCase ( __A , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = RoFormerTokenizer
_lowerCamelCase = RoFormerTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = True
def snake_case_ ( self ):
super().setUp()
def snake_case_ ( self , **__A ):
return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **__A )
def snake_case_ ( self , **__A ):
return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **__A )
def snake_case_ ( self ):
__a = """永和服装饰品有限公司,今天天气非常好"""
__a = """永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"""
return input_text, output_text
def snake_case_ ( self ):
__a = self.get_tokenizer()
__a , __a = self.get_chinese_input_output_texts()
__a = tokenizer.tokenize(__A )
self.assertListEqual(__A , output_text.split() )
__a = tokens + [tokenizer.unk_token]
__a = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
def snake_case_ ( self ):
__a = self.get_rust_tokenizer()
__a , __a = self.get_chinese_input_output_texts()
__a = tokenizer.tokenize(__A )
self.assertListEqual(__A , output_text.split() )
__a = tokens + [tokenizer.unk_token]
__a = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
def snake_case_ ( self ):
pass
def snake_case_ ( self ):
pass
def snake_case_ ( self ):
pass
| 209
|
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def a ():
raise RuntimeError("""CUDA out of memory.""" )
class __UpperCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self ):
super().__init__()
__a = nn.Linear(3 , 4 )
__a = nn.BatchNormad(4 )
__a = nn.Linear(4 , 5 )
def snake_case_ ( self , __A ):
return self.lineara(self.batchnorm(self.lineara(__A ) ) )
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self ):
__a = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__A ):
nonlocal batch_sizes
batch_sizes.append(__A )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__A , [128, 64, 32, 16, 8] )
def snake_case_ ( self ):
__a = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__A , __A ):
nonlocal batch_sizes
batch_sizes.append(__A )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
__a , __a = mock_training_loop_function("""hello""" )
self.assertListEqual(__A , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, """hello"""] )
def snake_case_ ( self ):
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(__A ):
pass
with self.assertRaises(__A ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def snake_case_ ( self ):
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__A ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__A ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def snake_case_ ( self ):
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__A , __A , __A ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__A ) as cm:
mock_training_loop_function(128 , """hello""" , """world""" )
self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0] )
self.assertIn("""`f(arg1='hello', arg2='world')""" , cm.exception.args[0] )
def snake_case_ ( self ):
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__A ):
raise ValueError("""Oops, we had an error!""" )
with self.assertRaises(__A ) as cm:
mock_training_loop_function()
self.assertIn("""Oops, we had an error!""" , cm.exception.args[0] )
@require_cuda
def snake_case_ ( self ):
__a = torch.cuda.memory_allocated()
__a = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , __A )
__a = release_memory(__A )
self.assertEqual(torch.cuda.memory_allocated() , __A )
| 209
| 1
|
"""simple docstring"""
def UpperCAmelCase ( snake_case : int = 2000000 ):
_lowerCAmelCase:int = [0 for i in range(n + 1 )]
_lowerCAmelCase:List[Any] = 1
_lowerCAmelCase:Dict = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , snake_case ):
_lowerCAmelCase:str = 1
_lowerCAmelCase:Union[str, Any] = 0
for i in range(snake_case ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F"{solution() = }")
| 227
|
"""simple docstring"""
from string import ascii_lowercase, ascii_uppercase
def UpperCAmelCase ( snake_case : str ):
if not sentence:
return ""
_lowerCAmelCase:Tuple = dict(zip(snake_case , snake_case ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 227
| 1
|
from __future__ import annotations
import math
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , lowerCamelCase ):
'''simple docstring'''
a__ = size
# approximate the overall size of segment tree with given value
a__ = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
a__ = [0 for i in range(0 , 4 * size )]
a__ = [0 for i in range(0 , 4 * size )] # flag for lazy update
def _A ( self , lowerCamelCase ):
'''simple docstring'''
return idx * 2
def _A ( self , lowerCamelCase ):
'''simple docstring'''
return idx * 2 + 1
def _A ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if left_element == right_element:
a__ = a[left_element - 1]
else:
a__ = (left_element + right_element) // 2
self.build(self.left(lowerCamelCase ) , lowerCamelCase , lowerCamelCase , lowerCamelCase )
self.build(self.right(lowerCamelCase ) , mid + 1 , lowerCamelCase , lowerCamelCase )
a__ = max(
self.segment_tree[self.left(lowerCamelCase )] , self.segment_tree[self.right(lowerCamelCase )] )
def _A ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if self.flag[idx] is True:
a__ = self.lazy[idx]
a__ = False
if left_element != right_element:
a__ = self.lazy[idx]
a__ = self.lazy[idx]
a__ = True
a__ = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
a__ = val
if left_element != right_element:
a__ = val
a__ = val
a__ = True
a__ = True
return True
a__ = (left_element + right_element) // 2
self.update(self.left(lowerCamelCase ) , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
self.update(self.right(lowerCamelCase ) , mid + 1 , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
a__ = max(
self.segment_tree[self.left(lowerCamelCase )] , self.segment_tree[self.right(lowerCamelCase )] )
return True
def _A ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if self.flag[idx] is True:
a__ = self.lazy[idx]
a__ = False
if left_element != right_element:
a__ = self.lazy[idx]
a__ = self.lazy[idx]
a__ = True
a__ = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
a__ = (left_element + right_element) // 2
a__ = self.query(self.left(lowerCamelCase ) , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
a__ = self.query(self.right(lowerCamelCase ) , mid + 1 , lowerCamelCase , lowerCamelCase , lowerCamelCase )
return max(lowerCamelCase , lowerCamelCase )
def __str__( self ):
'''simple docstring'''
return str([self.query(1 , 1 , self.size , lowerCamelCase , lowerCamelCase ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
_lowercase : List[Any] =[1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
_lowercase : Optional[Any] =15
_lowercase : Tuple =SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 1_11)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 2_35)
print(segt)
| 710
|
def UpperCAmelCase ( lowercase__ : str , lowercase__ : str ):
'''simple docstring'''
def get_matched_characters(lowercase__ : str , lowercase__ : str ) -> str:
a__ = []
a__ = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
a__ = int(max(0 , i - limit ) )
a__ = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(lowercase__ )
a__ = f'{_stra[0:_stra.index(lowercase__ )]} {_stra[_stra.index(lowercase__ ) + 1:]}'
return "".join(lowercase__ )
# matching characters
a__ = get_matched_characters(lowercase__ , lowercase__ )
a__ = get_matched_characters(lowercase__ , lowercase__ )
a__ = len(lowercase__ )
# transposition
a__ = (
len([(ca, ca) for ca, ca in zip(lowercase__ , lowercase__ ) if ca != ca] ) // 2
)
if not match_count:
a__ = 0.0
else:
a__ = (
1
/ 3
* (
match_count / len(lowercase__ )
+ match_count / len(lowercase__ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
a__ = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("""hello""", """world"""))
| 412
| 0
|
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def _a (lowercase__ : str , lowercase__ : str , lowercase__ : Optional[str] = None ) -> str:
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
__snake_case = quote(lowercase__ )
return hfh.hf_hub_url(lowercase__ , lowercase__ , repo_type='dataset' , revision=lowercase__ )
| 56
|
'''simple docstring'''
from collections import namedtuple
snake_case : Optional[int] = namedtuple('from_to', 'from_ to')
snake_case : Any = {
'cubicmeter': from_to(1, 1),
'litre': from_to(0.001, 1_000),
'kilolitre': from_to(1, 1),
'gallon': from_to(0.0_0454, 264.172),
'cubicyard': from_to(0.7_6455, 1.3_0795),
'cubicfoot': from_to(0.028, 35.3147),
'cup': from_to(0.0_0023_6588, 4226.75),
}
def lowercase__ ( __UpperCamelCase : float , __UpperCamelCase : str , __UpperCamelCase : str ):
'''simple docstring'''
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ """, """.join(__UpperCamelCase ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ """, """.join(__UpperCamelCase ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 566
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : int = logging.get_logger(__name__)
_a : int = {"""vocab_file""": """sentencepiece.bpe.model"""}
_a : List[Any] = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
}
_a : Optional[Any] = {
"""moussaKam/mbarthez""": 1_024,
"""moussaKam/barthez""": 1_024,
"""moussaKam/barthez-orangesum-title""": 1_024,
}
_a : Union[str, Any] = """▁"""
class _UpperCAmelCase ( _snake_case):
__lowercase : Union[str, Any] = VOCAB_FILES_NAMES
__lowercase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , snake_case_ , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_ = None , **snake_case_ , ):
# Mask token behave like a normal word, i.e. include the space before it
_snake_case : List[str] = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
_snake_case : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , **snake_case_ , )
_snake_case : Optional[Any] = vocab_file
_snake_case : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case_ ) )
_snake_case : Optional[Any] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
_snake_case : Any = len(self.sp_model ) - 1
_snake_case : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def lowerCamelCase__ ( self , snake_case_ , snake_case_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_snake_case : List[Any] = [self.cls_token_id]
_snake_case : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase__ ( self , snake_case_ , snake_case_ = None , snake_case_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case_ )) + [1]
return [1] + ([0] * len(snake_case_ )) + [1, 1] + ([0] * len(snake_case_ )) + [1]
def lowerCamelCase__ ( self , snake_case_ , snake_case_ = None ):
_snake_case : Dict = [self.sep_token_id]
_snake_case : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase__ ( self ):
return len(self.sp_model )
def lowerCamelCase__ ( self ):
_snake_case : List[str] = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase__ ( self , snake_case_ ):
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def lowerCamelCase__ ( self , snake_case_ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_snake_case : Optional[int] = self.sp_model.PieceToId(snake_case_ )
return spm_id if spm_id else self.unk_token_id
def lowerCamelCase__ ( self , snake_case_ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(snake_case_ )
def lowerCamelCase__ ( self , snake_case_ ):
_snake_case : Optional[Any] = []
_snake_case : List[Any] = ""
_snake_case : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case_ ) + token
_snake_case : Tuple = True
_snake_case : int = []
else:
current_sub_tokens.append(snake_case_ )
_snake_case : Dict = False
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def __getstate__( self ):
_snake_case : Dict = self.__dict__.copy()
_snake_case : int = None
return state
def __setstate__( self , snake_case_ ):
_snake_case : str = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_snake_case : List[str] = {}
_snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ = None ):
if not os.path.isdir(snake_case_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_snake_case : Any = os.path.join(
snake_case_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case_ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case_ , "wb" ) as fi:
_snake_case : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (out_vocab_file,)
| 87
|
"""simple docstring"""
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def a__ ( a : List[str] , a : Any ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_snake_case : Any = flax_key_tuple[:-1] + ("weight",)
_snake_case : str = torch.permute(a , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(a ):
# linear layer
_snake_case : Optional[int] = flax_key_tuple[:-1] + ("weight",)
_snake_case : Any = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_snake_case : Union[str, Any] = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def a__ ( a : List[Any] , a : Union[str, Any] , a : List[str] ):
"""simple docstring"""
if "metadata" in layer:
_snake_case : Optional[int] = layer.split("metadata" )
_snake_case : Optional[int] = "".join(split_layer[0] )[:-1]
_snake_case : int = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
_snake_case : Any = layer.split("kvstore" )
_snake_case : str = "".join(split_layer[0] )[:-1]
_snake_case : Any = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
_snake_case : List[Any] = layer.split("/" )
_snake_case : Tuple = "/".join(split_layer[:-1] )
_snake_case : int = (split_layer[-1],)
if "kvstore/path" in layer:
_snake_case : Optional[Any] = f'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
_snake_case : Tuple = "file"
else:
_snake_case : Optional[int] = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def a__ ( a : List[Any] , a : List[Any] ):
"""simple docstring"""
_snake_case : Union[str, Any] = rename_keys(a )
_snake_case : int = {}
for k, v in current_block.items():
_snake_case : Optional[int] = v
_snake_case : Optional[int] = new_current_block
torch.save(a , a )
def a__ ( a : Dict , a : Tuple , a : List[str] , a : int , a : str = WEIGHTS_NAME ):
"""simple docstring"""
_snake_case : Any = convert_file_size_to_int(a )
_snake_case : Tuple = []
_snake_case : Optional[int] = {}
_snake_case : Tuple = 0
_snake_case : Optional[Any] = 0
os.makedirs(a , exist_ok=a )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
_snake_case : Any = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
_snake_case : Optional[Any] = flatten_dict(a , sep="/" )
_snake_case : Optional[Any] = {}
for layer in checkpoint_info.keys():
_snake_case , _snake_case , _snake_case : int = get_key_and_tensorstore_dict(
a , a , a )
if curr_real_layer_name in all_layers:
_snake_case : Dict = content
else:
_snake_case : Tuple = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_snake_case : List[str] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_snake_case : Dict = torch.tensor(a )
_snake_case : Dict = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_snake_case , _snake_case : Optional[int] = rename_base_flax_keys(tuple(key.split("/" ) ) , a )
_snake_case : Optional[Any] = "/".join(a )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_snake_case : Any = os.path.join(
a , weights_name.replace(".bin" , f'-{len(a )+1:05d}-of-???.bin' ) )
rename_and_save_block(a , a )
sharded_state_dicts.append(current_block.keys() )
del current_block
_snake_case : List[Any] = {}
_snake_case : str = 0
_snake_case : List[str] = raw_weights.to(getattr(a , a ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_snake_case : int = os.path.join(a , weights_name.replace(".bin" , f'-{len(a )+1:05d}-of-???.bin' ) )
rename_and_save_block(a , a )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(a ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_snake_case : str = {}
_snake_case : Any = {}
for idx, shard in enumerate(a ):
_snake_case : Optional[int] = weights_name.replace(
".bin" , f'-{idx+1:05d}-of-{len(a ):05d}.bin' ) # len(sharded_state_dicts):05d}
_snake_case : Dict = os.path.join(a , weights_name.replace(".bin" , f'-{idx+1:05d}-of-???.bin' ) )
os.rename(a , os.path.join(a , a ) )
_snake_case : Dict = shard
for key in shard:
_snake_case : int = shard_file
# Add the metadata
_snake_case : List[Any] = {"total_size": total_size}
_snake_case : Any = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(a , a ) , "w" , encoding="utf-8" ) as f:
_snake_case : Union[str, Any] = json.dumps(a , indent=2 , sort_keys=a ) + "\n"
f.write(a )
return metadata, index
if __name__ == "__main__":
_a : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--max_shard_size""", default="""10GB""", required=False, help="""Max shard size""")
parser.add_argument("""--dtype""", default="""bfloat16""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
_a : Optional[int] = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def a__ ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_snake_case : List[str] = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
_snake_case : str = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
_snake_case : List[Any] = TaTokenizer.from_pretrained("t5-small" )
_snake_case : Optional[Any] = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
_snake_case : Dict = tokenizer(a , return_tensors="pt" ).input_ids
_snake_case : List[Any] = model.generate(a , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 87
| 1
|
from maths.prime_factors import prime_factors
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
if not isinstance(a_ , a_ ):
__A = F'''Input value of [number={number}] must be an integer'''
raise TypeError(a_ )
if number < 1:
raise ValueError("Input must be a positive integer" )
return -1 if len(prime_factors(a_ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55
|
'''simple docstring'''
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Union[str, Any] = ort.SessionOptions()
lowercase_ : Optional[int] = False
return options
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
lowercase_ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
lowercase_ : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy" )
# using the PNDM scheduler by default
lowercase_ : List[Any] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
lowercase_ : List[str] = "A red cat sitting on a park bench"
lowercase_ : int = np.random.RandomState(0 )
lowercase_ : int = pipe(
prompt=a , image=a , mask_image=a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=1_5 , generator=a , output_type="np" , )
lowercase_ : Tuple = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 620
| 0
|
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCamelCase ( lowerCAmelCase , unittest.TestCase ):
UpperCAmelCase_ = LxmertTokenizer
UpperCAmelCase_ = LxmertTokenizerFast
UpperCAmelCase_ = True
UpperCAmelCase_ = True
def UpperCAmelCase_ ( self :Union[str, Any] ) -> Any:
super().setUp()
UpperCAmelCase__ = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCAmelCase_ ( self :Optional[Any] , lowerCamelCase :Tuple ) -> Optional[int]:
UpperCAmelCase__ = "UNwant\u00E9d,running"
UpperCAmelCase__ = "unwanted, running"
return input_text, output_text
def UpperCAmelCase_ ( self :Tuple ) -> Any:
UpperCAmelCase__ = self.tokenizer_class(self.vocab_file )
UpperCAmelCase__ = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(lowerCamelCase , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [7, 4, 5, 10, 8, 9] )
def UpperCAmelCase_ ( self :Optional[int] ) -> Optional[int]:
if not self.test_rust_tokenizer:
return
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = self.get_rust_tokenizer()
UpperCAmelCase__ = "I was born in 92000, and this is falsé."
UpperCAmelCase__ = tokenizer.tokenize(lowerCamelCase )
UpperCAmelCase__ = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
UpperCAmelCase__ = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = self.get_rust_tokenizer()
UpperCAmelCase__ = tokenizer.encode(lowerCamelCase )
UpperCAmelCase__ = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
| 707
|
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
_lowerCAmelCase : int = "facebook/wmt19-en-de"
_lowerCAmelCase : int = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
_lowerCAmelCase : Dict = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
_lowerCAmelCase : List[Any] = FSMTForConditionalGeneration(config)
print(F'''num of params {tiny_model.num_parameters()}''')
# Test
_lowerCAmelCase : Optional[Any] = tokenizer(["Making tiny model"], return_tensors="pt")
_lowerCAmelCase : Optional[Any] = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
_lowerCAmelCase : Optional[Any] = "tiny-wmt19-en-de"
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 364
| 0
|
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
lowercase__ : Tuple = 'facebook/wmt19-en-de'
lowercase__ : Tuple = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
lowercase__ : List[Any] = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
lowercase__ : List[str] = FSMTForConditionalGeneration(config)
print(F"""num of params {tiny_model.num_parameters()}""")
# Test
lowercase__ : Union[str, Any] = tokenizer(['Making tiny model'], return_tensors='pt')
lowercase__ : List[str] = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
lowercase__ : Dict = 'tiny-wmt19-en-de'
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 98
|
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
_lowerCamelCase = '\\n\n'
_lowerCamelCase = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
_lowerCamelCase = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
"""simple docstring"""
def __snake_case ( self):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string'''),
}) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def __snake_case ( self , a__ , a__ , a__ = 16 , a__ = True , a__=None):
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
_lowerCamelCase : List[Any] = '''cuda'''
else:
_lowerCamelCase : Dict = '''cuda''' if torch.cuda.is_available() else '''cpu'''
_lowerCamelCase : List[str] = AutoModelForCausalLM.from_pretrained(a__)
_lowerCamelCase : Union[str, Any] = model.to(a__)
_lowerCamelCase : Tuple = AutoTokenizer.from_pretrained(a__)
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
_lowerCamelCase : Any = list(tokenizer.special_tokens_map_extended.values())
# check that the model already has at least one special token defined
assert (
len(a__) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]})
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
_lowerCamelCase : Optional[int] = model.config.max_length - 1
else:
_lowerCamelCase : Optional[int] = model.config.max_length
_lowerCamelCase : Union[str, Any] = tokenizer(
a__ , add_special_tokens=a__ , padding=a__ , truncation=a__ , max_length=a__ , return_tensors='''pt''' , return_attention_mask=a__ , ).to(a__)
_lowerCamelCase : Any = encodings['''input_ids''']
_lowerCamelCase : Dict = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1) , 1)), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1) , 2)), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
_lowerCamelCase : Dict = []
_lowerCamelCase : Union[str, Any] = CrossEntropyLoss(reduction='''none''')
for start_index in logging.tqdm(range(0 , len(a__) , a__)):
_lowerCamelCase : Any = min(start_index + batch_size , len(a__))
_lowerCamelCase : List[Any] = encoded_texts[start_index:end_index]
_lowerCamelCase : Optional[int] = attn_masks[start_index:end_index]
if add_start_token:
_lowerCamelCase : Tuple = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0)).to(a__)
_lowerCamelCase : str = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1)
_lowerCamelCase : str = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa).to(a__), attn_mask] , dim=1)
_lowerCamelCase : Dict = encoded_batch
with torch.no_grad():
_lowerCamelCase : List[str] = model(a__ , attention_mask=a__).logits
_lowerCamelCase : List[str] = out_logits[..., :-1, :].contiguous()
_lowerCamelCase : Dict = labels[..., 1:].contiguous()
_lowerCamelCase : List[Any] = attn_mask[..., 1:].contiguous()
_lowerCamelCase : str = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2) , a__) * shift_attention_mask_batch).sum(1)
/ shift_attention_mask_batch.sum(1))
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(a__)}
| 114
| 0
|
'''simple docstring'''
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). ", __lowercase, )
class a__ ( __lowercase ):
__magic_name__ : str = RobertaConfig
__magic_name__ : Dict = 'roberta'
def __init__(self : Optional[int], __UpperCAmelCase : int ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(__A )
SCREAMING_SNAKE_CASE : Dict = RobertaEmbeddings(__A )
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ", __lowercase, )
class a__ ( __lowercase ):
__magic_name__ : int = RobertaConfig
__magic_name__ : Tuple = 'roberta'
def __init__(self : Any, __UpperCAmelCase : str ) -> Dict:
"""simple docstring"""
super().__init__(__A )
SCREAMING_SNAKE_CASE : Union[str, Any] = config.num_labels
SCREAMING_SNAKE_CASE : List[str] = config.num_hidden_layers
SCREAMING_SNAKE_CASE : str = DeeRobertaModel(__A )
SCREAMING_SNAKE_CASE : Any = nn.Dropout(config.hidden_dropout_prob )
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(config.hidden_size, self.config.num_labels )
@add_start_docstrings_to_model_forward(__A )
def lowercase__ (self : int, __UpperCAmelCase : Dict=None, __UpperCAmelCase : List[str]=None, __UpperCAmelCase : List[str]=None, __UpperCAmelCase : Optional[Any]=None, __UpperCAmelCase : int=None, __UpperCAmelCase : str=None, __UpperCAmelCase : List[Any]=None, __UpperCAmelCase : int=-1, __UpperCAmelCase : Optional[int]=False, ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.num_layers
try:
SCREAMING_SNAKE_CASE : Any = self.roberta(
__A, attention_mask=__A, token_type_ids=__A, position_ids=__A, head_mask=__A, inputs_embeds=__A, )
SCREAMING_SNAKE_CASE : int = outputs[1]
SCREAMING_SNAKE_CASE : Any = self.dropout(__A )
SCREAMING_SNAKE_CASE : Any = self.classifier(__A )
SCREAMING_SNAKE_CASE : Optional[int] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
SCREAMING_SNAKE_CASE : Union[str, Any] = e.message
SCREAMING_SNAKE_CASE : List[Any] = e.exit_layer
SCREAMING_SNAKE_CASE : int = outputs[0]
if not self.training:
SCREAMING_SNAKE_CASE : Any = entropy(__A )
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Optional[int] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE : Optional[Any] = MSELoss()
SCREAMING_SNAKE_CASE : Any = loss_fct(logits.view(-1 ), labels.view(-1 ) )
else:
SCREAMING_SNAKE_CASE : str = CrossEntropyLoss()
SCREAMING_SNAKE_CASE : Union[str, Any] = loss_fct(logits.view(-1, self.num_labels ), labels.view(-1 ) )
# work with highway exits
SCREAMING_SNAKE_CASE : Optional[Any] = []
for highway_exit in outputs[-1]:
SCREAMING_SNAKE_CASE : Any = highway_exit[0]
if not self.training:
highway_logits_all.append(__A )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE : Tuple = MSELoss()
SCREAMING_SNAKE_CASE : List[str] = loss_fct(highway_logits.view(-1 ), labels.view(-1 ) )
else:
SCREAMING_SNAKE_CASE : Dict = CrossEntropyLoss()
SCREAMING_SNAKE_CASE : Tuple = loss_fct(highway_logits.view(-1, self.num_labels ), labels.view(-1 ) )
highway_losses.append(__A )
if train_highway:
SCREAMING_SNAKE_CASE : Optional[Any] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
SCREAMING_SNAKE_CASE : Optional[Any] = (loss,) + outputs
if not self.training:
SCREAMING_SNAKE_CASE : Tuple = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 714
|
'''simple docstring'''
import pickle
import numpy as np
from matplotlib import pyplot as plt
class a__ :
def __init__(self : Any, __UpperCAmelCase : int, __UpperCAmelCase : Optional[Any], __UpperCAmelCase : List[Any], __UpperCAmelCase : Optional[int], __UpperCAmelCase : int, __UpperCAmelCase : List[str]=0.2, __UpperCAmelCase : Dict=0.2 ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = bp_numa
SCREAMING_SNAKE_CASE : Optional[Any] = bp_numa
SCREAMING_SNAKE_CASE : str = bp_numa
SCREAMING_SNAKE_CASE : Dict = conva_get[:2]
SCREAMING_SNAKE_CASE : Union[str, Any] = conva_get[2]
SCREAMING_SNAKE_CASE : int = size_pa
SCREAMING_SNAKE_CASE : int = rate_w
SCREAMING_SNAKE_CASE : int = rate_t
SCREAMING_SNAKE_CASE : int = [
np.mat(-1 * np.random.rand(self.conva[0], self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
SCREAMING_SNAKE_CASE : List[Any] = np.mat(-1 * np.random.rand(self.num_bpa, self.num_bpa ) + 0.5 )
SCREAMING_SNAKE_CASE : Dict = np.mat(-1 * np.random.rand(self.num_bpa, self.num_bpa ) + 0.5 )
SCREAMING_SNAKE_CASE : List[str] = -2 * np.random.rand(self.conva[1] ) + 1
SCREAMING_SNAKE_CASE : Optional[int] = -2 * np.random.rand(self.num_bpa ) + 1
SCREAMING_SNAKE_CASE : Optional[int] = -2 * np.random.rand(self.num_bpa ) + 1
def lowercase__ (self : Union[str, Any], __UpperCAmelCase : Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = {
'''num_bp1''': self.num_bpa,
'''num_bp2''': self.num_bpa,
'''num_bp3''': self.num_bpa,
'''conv1''': self.conva,
'''step_conv1''': self.step_conva,
'''size_pooling1''': self.size_poolinga,
'''rate_weight''': self.rate_weight,
'''rate_thre''': self.rate_thre,
'''w_conv1''': self.w_conva,
'''wkj''': self.wkj,
'''vji''': self.vji,
'''thre_conv1''': self.thre_conva,
'''thre_bp2''': self.thre_bpa,
'''thre_bp3''': self.thre_bpa,
}
with open(__UpperCAmelCase, '''wb''' ) as f:
pickle.dump(__UpperCAmelCase, __UpperCAmelCase )
print(F'''Model saved: {save_path}''' )
@classmethod
def lowercase__ (cls : str, __UpperCAmelCase : List[str] ) -> int:
"""simple docstring"""
with open(__UpperCAmelCase, '''rb''' ) as f:
SCREAMING_SNAKE_CASE : int = pickle.load(__UpperCAmelCase ) # noqa: S301
SCREAMING_SNAKE_CASE : List[str] = model_dic.get('''conv1''' )
conv_get.append(model_dic.get('''step_conv1''' ) )
SCREAMING_SNAKE_CASE : Dict = model_dic.get('''size_pooling1''' )
SCREAMING_SNAKE_CASE : Tuple = model_dic.get('''num_bp1''' )
SCREAMING_SNAKE_CASE : Optional[Any] = model_dic.get('''num_bp2''' )
SCREAMING_SNAKE_CASE : List[str] = model_dic.get('''num_bp3''' )
SCREAMING_SNAKE_CASE : Any = model_dic.get('''rate_weight''' )
SCREAMING_SNAKE_CASE : Any = model_dic.get('''rate_thre''' )
# create model instance
SCREAMING_SNAKE_CASE : List[Any] = CNN(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
# modify model parameter
SCREAMING_SNAKE_CASE : Optional[Any] = model_dic.get('''w_conv1''' )
SCREAMING_SNAKE_CASE : Tuple = model_dic.get('''wkj''' )
SCREAMING_SNAKE_CASE : Any = model_dic.get('''vji''' )
SCREAMING_SNAKE_CASE : Optional[Any] = model_dic.get('''thre_conv1''' )
SCREAMING_SNAKE_CASE : List[str] = model_dic.get('''thre_bp2''' )
SCREAMING_SNAKE_CASE : Optional[Any] = model_dic.get('''thre_bp3''' )
return conv_ins
def lowercase__ (self : Optional[Any], __UpperCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
return 1 / (1 + np.exp(-1 * x ))
def lowercase__ (self : Optional[int], __UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
return round(__UpperCAmelCase, 3 )
def lowercase__ (self : Union[str, Any], __UpperCAmelCase : Union[str, Any], __UpperCAmelCase : Optional[Any], __UpperCAmelCase : Optional[int], __UpperCAmelCase : Any, __UpperCAmelCase : int ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = convs[0]
SCREAMING_SNAKE_CASE : int = convs[1]
SCREAMING_SNAKE_CASE : List[str] = np.shape(__UpperCAmelCase )[0]
# get the data slice of original image data, data_focus
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for i_focus in range(0, size_data - size_conv + 1, __UpperCAmelCase ):
for j_focus in range(0, size_data - size_conv + 1, __UpperCAmelCase ):
SCREAMING_SNAKE_CASE : Optional[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__UpperCAmelCase )
# calculate the feature map of every single kernel, and saved as list of matrix
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : Optional[int] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE : Optional[int] = []
for i_focus in range(len(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE : Any = (
np.sum(np.multiply(data_focus[i_focus], w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE : Dict = np.asmatrix(__UpperCAmelCase ).reshape(
__UpperCAmelCase, __UpperCAmelCase )
data_featuremap.append(__UpperCAmelCase )
# expanding the data slice to One dimenssion
SCREAMING_SNAKE_CASE : Optional[int] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.asarray(__UpperCAmelCase )
return focus_list, data_featuremap
def lowercase__ (self : Any, __UpperCAmelCase : Optional[int], __UpperCAmelCase : Tuple, __UpperCAmelCase : List[str]="average_pool" ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = len(featuremaps[0] )
SCREAMING_SNAKE_CASE : str = int(size_map / size_pooling )
SCREAMING_SNAKE_CASE : Tuple = []
for i_map in range(len(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE : str = featuremaps[i_map]
SCREAMING_SNAKE_CASE : Any = []
for i_focus in range(0, __UpperCAmelCase, __UpperCAmelCase ):
for j_focus in range(0, __UpperCAmelCase, __UpperCAmelCase ):
SCREAMING_SNAKE_CASE : str = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__UpperCAmelCase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE : Tuple = np.asmatrix(__UpperCAmelCase ).reshape(__UpperCAmelCase, __UpperCAmelCase )
featuremap_pooled.append(__UpperCAmelCase )
return featuremap_pooled
def lowercase__ (self : Any, __UpperCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = []
for i in range(len(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE : Union[str, Any] = np.shape(data[i] )
SCREAMING_SNAKE_CASE : str = data[i].reshape(1, shapes[0] * shapes[1] )
SCREAMING_SNAKE_CASE : int = data_listed.getA().tolist()[0]
data_expanded.extend(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : List[Any] = np.asarray(__UpperCAmelCase )
return data_expanded
def lowercase__ (self : Any, __UpperCAmelCase : Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = np.asarray(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Dict = np.shape(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Tuple = data_mat.reshape(1, shapes[0] * shapes[1] )
return data_expanded
def lowercase__ (self : Tuple, __UpperCAmelCase : List[Any], __UpperCAmelCase : Dict, __UpperCAmelCase : Any, __UpperCAmelCase : Optional[int], __UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : List[str] = 0
for i_map in range(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE : Dict = np.ones((size_map, size_map) )
for i in range(0, __UpperCAmelCase, __UpperCAmelCase ):
for j in range(0, __UpperCAmelCase, __UpperCAmelCase ):
SCREAMING_SNAKE_CASE : str = pd_pool[
i_pool
]
SCREAMING_SNAKE_CASE : Dict = i_pool + 1
SCREAMING_SNAKE_CASE : List[Any] = np.multiply(
__UpperCAmelCase, np.multiply(out_map[i_map], (1 - out_map[i_map]) ) )
pd_all.append(__UpperCAmelCase )
return pd_all
def lowercase__ (self : Optional[int], __UpperCAmelCase : Optional[int], __UpperCAmelCase : Union[str, Any], __UpperCAmelCase : List[Any], __UpperCAmelCase : List[str], __UpperCAmelCase : Union[str, Any], __UpperCAmelCase : Optional[int]=bool ) -> List[Any]:
"""simple docstring"""
print('''----------------------Start Training-------------------------''' )
print((''' - - Shape: Train_Data ''', np.shape(__UpperCAmelCase )) )
print((''' - - Shape: Teach_Data ''', np.shape(__UpperCAmelCase )) )
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : Any = 10000
while rp < n_repeat and mse >= error_accuracy:
SCREAMING_SNAKE_CASE : int = 0
print(F'''-------------Learning Time {rp}--------------''' )
for p in range(len(__UpperCAmelCase ) ):
# print('------------Learning Image: %d--------------'%p)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.asmatrix(datas_train[p] )
SCREAMING_SNAKE_CASE : Tuple = np.asarray(datas_teach[p] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.convolute(
__UpperCAmelCase, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, )
SCREAMING_SNAKE_CASE : Dict = self.pooling(__UpperCAmelCase, self.size_poolinga )
SCREAMING_SNAKE_CASE : Dict = np.shape(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : str = self._expand(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = data_bp_input
SCREAMING_SNAKE_CASE : Tuple = np.dot(__UpperCAmelCase, self.vji.T ) - self.thre_bpa
SCREAMING_SNAKE_CASE : List[Any] = self.sig(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Dict = np.dot(__UpperCAmelCase, self.wkj.T ) - self.thre_bpa
SCREAMING_SNAKE_CASE : Optional[Any] = self.sig(__UpperCAmelCase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
SCREAMING_SNAKE_CASE : Union[str, Any] = np.multiply(
(data_teach - bp_outa), np.multiply(__UpperCAmelCase, (1 - bp_outa) ) )
SCREAMING_SNAKE_CASE : List[Any] = np.multiply(
np.dot(__UpperCAmelCase, self.wkj ), np.multiply(__UpperCAmelCase, (1 - bp_outa) ) )
SCREAMING_SNAKE_CASE : Dict = np.dot(__UpperCAmelCase, self.vji )
SCREAMING_SNAKE_CASE : Dict = pd_i_all / (self.size_poolinga * self.size_poolinga)
SCREAMING_SNAKE_CASE : Optional[Any] = pd_conva_pooled.T.getA().tolist()
SCREAMING_SNAKE_CASE : Optional[Any] = self._calculate_gradient_from_pool(
__UpperCAmelCase, __UpperCAmelCase, shape_featuremapa[0], shape_featuremapa[1], self.size_poolinga, )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
SCREAMING_SNAKE_CASE : Optional[Any] = self._expand_mat(pd_conva_all[k_conv] )
SCREAMING_SNAKE_CASE : Any = self.rate_weight * np.dot(__UpperCAmelCase, __UpperCAmelCase )
SCREAMING_SNAKE_CASE : List[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
SCREAMING_SNAKE_CASE : Union[str, Any] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
SCREAMING_SNAKE_CASE : Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE : List[str] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE : List[Any] = self.thre_bpa - pd_k_all * self.rate_thre
SCREAMING_SNAKE_CASE : int = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
SCREAMING_SNAKE_CASE : List[Any] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
SCREAMING_SNAKE_CASE : Union[str, Any] = rp + 1
SCREAMING_SNAKE_CASE : Union[str, Any] = error_count / patterns
all_mse.append(__UpperCAmelCase )
def draw_error():
SCREAMING_SNAKE_CASE : Tuple = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(__UpperCAmelCase, '''+-''' )
plt.plot(__UpperCAmelCase, '''r--''' )
plt.xlabel('''Learning Times''' )
plt.ylabel('''All_mse''' )
plt.grid(__UpperCAmelCase, alpha=0.5 )
plt.show()
print('''------------------Training Complished---------------------''' )
print((''' - - Training epoch: ''', rp, F''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def lowercase__ (self : List[str], __UpperCAmelCase : List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = []
print('''-------------------Start Testing-------------------------''' )
print((''' - - Shape: Test_Data ''', np.shape(__UpperCAmelCase )) )
for p in range(len(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE : Union[str, Any] = np.asmatrix(datas_test[p] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.convolute(
__UpperCAmelCase, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, )
SCREAMING_SNAKE_CASE : List[Any] = self.pooling(__UpperCAmelCase, self.size_poolinga )
SCREAMING_SNAKE_CASE : str = self._expand(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Any = data_bp_input
SCREAMING_SNAKE_CASE : Optional[Any] = bp_outa * self.vji.T - self.thre_bpa
SCREAMING_SNAKE_CASE : Dict = self.sig(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Any = bp_outa * self.wkj.T - self.thre_bpa
SCREAMING_SNAKE_CASE : Optional[Any] = self.sig(__UpperCAmelCase )
produce_out.extend(bp_outa.getA().tolist() )
SCREAMING_SNAKE_CASE : List[str] = [list(map(self.do_round, __UpperCAmelCase ) ) for each in produce_out]
return np.asarray(__UpperCAmelCase )
def lowercase__ (self : Optional[Any], __UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = np.asmatrix(__UpperCAmelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.convolute(
__UpperCAmelCase, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, )
SCREAMING_SNAKE_CASE : List[Any] = self.pooling(__UpperCAmelCase, self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 355
| 0
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class lowercase:
'''simple docstring'''
lowercase__ = field(
metadata={"help": "The output directory where the model will be written."} , )
lowercase__ = field(
metadata={
"help": (
"The encoder model checkpoint for weights initialization."
"Don't set if you want to train an encoder model from scratch."
)
} , )
lowercase__ = field(
metadata={
"help": (
"The decoder model checkpoint for weights initialization."
"Don't set if you want to train a decoder model from scratch."
)
} , )
lowercase__ = field(
default=__a , metadata={"help": "Pretrained encoder config name or path if not the same as encoder_model_name"} )
lowercase__ = field(
default=__a , metadata={"help": "Pretrained decoder config name or path if not the same as decoder_model_name"} )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Dict = HfArgumentParser((ModelArguments,) )
((_snake_case) , ) : Any = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
_snake_case : List[str] = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
_snake_case : List[Any] = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
_snake_case : Optional[Any] = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
_snake_case : List[Any] = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
_snake_case : Union[str, Any] = True
_snake_case : Any = True
_snake_case : Optional[Any] = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=snake_case__ , decoder_config=snake_case__ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
_snake_case : List[str] = decoder_config.decoder_start_token_id
_snake_case : Optional[int] = decoder_config.pad_token_id
if decoder_start_token_id is None:
_snake_case : Optional[int] = decoder_config.bos_token_id
if pad_token_id is None:
_snake_case : List[str] = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
_snake_case : str = decoder_config.eos_token_id
_snake_case : Tuple = decoder_start_token_id
_snake_case : List[str] = pad_token_id
_snake_case : str = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
_snake_case : List[str] = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
_snake_case : List[Any] = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 609
|
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
A_ = logging.get_logger(__name__)
A_ = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class lowercase( __a ):
'''simple docstring'''
def __init__( self: List[str], a_: Dict=None, a_: int=None, *a_: List[Any], **a_: Union[str, Any] ):
'''simple docstring'''
super().__init__(*a_, **a_ )
if config is None:
assert isinstance(self.model, a_ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f" {self.model.__class__}"
)
_snake_case : Any = self.model.config
else:
_snake_case : int = config
_snake_case : Union[str, Any] = data_args
_snake_case : Union[str, Any] = self.config.tgt_vocab_size if isinstance(self.config, a_ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"
""" padding..""" )
if self.args.label_smoothing == 0:
_snake_case : Tuple = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_snake_case : Dict = label_smoothed_nll_loss
def UpperCamelCase_ ( self: int, a_: int ):
'''simple docstring'''
if self.optimizer is None:
_snake_case : Optional[Any] = ["""bias""", """LayerNorm.weight"""]
_snake_case : Optional[Any] = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
_snake_case : int = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_snake_case : str = Adafactor
_snake_case : List[Any] = {"""scale_parameter""": False, """relative_step""": False}
else:
_snake_case : Any = AdamW
_snake_case : Tuple = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
_snake_case : List[Any] = self.args.learning_rate
if self.sharded_ddp:
_snake_case : Dict = OSS(
params=a_, optim=a_, **a_, )
else:
_snake_case : Union[str, Any] = optimizer_cls(a_, **a_ )
if self.lr_scheduler is None:
_snake_case : Optional[int] = self._get_lr_scheduler(a_ )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def UpperCamelCase_ ( self: Dict, a_: List[str] ):
'''simple docstring'''
_snake_case : Union[str, Any] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_snake_case : Union[str, Any] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_snake_case : List[Any] = schedule_func(self.optimizer, num_warmup_steps=self.args.warmup_steps )
else:
_snake_case : Tuple = schedule_func(
self.optimizer, num_warmup_steps=self.args.warmup_steps, num_training_steps=a_ )
return scheduler
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
if isinstance(self.train_dataset, torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size, distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED), )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def UpperCamelCase_ ( self: List[str], a_: int, a_: Optional[int], a_: str ):
'''simple docstring'''
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_snake_case : int = model(**a_, use_cache=a_ )[0]
_snake_case : Union[str, Any] = self.loss_fn(logits.view(-1, logits.shape[-1] ), labels.view(-1 ) )
else:
# compute usual loss via models
_snake_case , _snake_case : Optional[Any] = model(**a_, labels=a_, use_cache=a_ )[:2]
else:
# compute label smoothed loss
_snake_case : Union[str, Any] = model(**a_, use_cache=a_ )[0]
_snake_case : Optional[Any] = torch.nn.functional.log_softmax(a_, dim=-1 )
_snake_case , _snake_case : List[Any] = self.loss_fn(a_, a_, self.args.label_smoothing, ignore_index=self.config.pad_token_id )
return loss, logits
def UpperCamelCase_ ( self: List[str], a_: List[Any], a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : Any = inputs.pop("""labels""" )
_snake_case , _snake_case : str = self._compute_loss(a_, a_, a_ )
return loss
def UpperCamelCase_ ( self: Optional[int], a_: nn.Module, a_: Dict[str, Union[torch.Tensor, Any]], a_: bool, a_: Optional[List[str]] = None, ):
'''simple docstring'''
_snake_case : str = self._prepare_inputs(a_ )
_snake_case : List[str] = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_snake_case : List[str] = self.model.generate(
inputs["""input_ids"""], attention_mask=inputs["""attention_mask"""], **a_, )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_snake_case : Union[str, Any] = self._pad_tensors_to_max_len(a_, gen_kwargs["""max_length"""] )
_snake_case : Tuple = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
_snake_case , _snake_case : Dict = self._compute_loss(a_, a_, a_ )
_snake_case : int = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_snake_case : Optional[Any] = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_snake_case : Tuple = self._pad_tensors_to_max_len(a_, gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def UpperCamelCase_ ( self: Tuple, a_: List[str], a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : Dict = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
f" padded to `max_length`={max_length}" )
_snake_case : List[str] = pad_token_id * torch.ones(
(tensor.shape[0], max_length), dtype=tensor.dtype, device=tensor.device )
_snake_case : Tuple = tensor
return padded_tensor
| 609
| 1
|
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
__lowerCAmelCase = """src/diffusers"""
# Matches is_xxx_available()
__lowerCAmelCase = re.compile(r"""is\_([a-z_]*)_available\(\)""")
# Matches from xxx import bla
__lowerCAmelCase = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
__lowerCAmelCase = """
{0} = None
"""
__lowerCAmelCase = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
__lowerCAmelCase = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def _lowercase ( a__ : Any ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = _re_backend.findall(a__ )
if len(a__ ) == 0:
return None
return "_and_".join(a__ )
def _lowercase ( ) -> Optional[int]:
"""simple docstring"""
with open(os.path.join(a__ , "__init__.py" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
_UpperCamelCase = f.readlines()
# Get to the point we do the actual imports for type checking
_UpperCamelCase = 0
_UpperCamelCase = {}
# Go through the end of the file
while line_index < len(a__ ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
_UpperCamelCase = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("else:" ):
line_index += 1
line_index += 1
_UpperCamelCase = []
# Until we unindent, add backend objects to the list
while line_index < len(a__ ) and len(lines[line_index] ) > 1:
_UpperCamelCase = lines[line_index]
_UpperCamelCase = _re_single_line_import.search(a__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(a__ ) > 0:
_UpperCamelCase = objects
else:
line_index += 1
return backend_specific_objects
def _lowercase ( a__ : List[str] , a__ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(a__ )
elif name.islower():
return DUMMY_FUNCTION.format(a__ , a__ )
else:
return DUMMY_CLASS.format(a__ , a__ )
def _lowercase ( a__ : List[str]=None ) -> Optional[Any]:
"""simple docstring"""
if backend_specific_objects is None:
_UpperCamelCase = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
_UpperCamelCase = {}
for backend, objects in backend_specific_objects.items():
_UpperCamelCase = "[" + ", ".join(f'''"{b}"''' for b in backend.split("_and_" ) ) + "]"
_UpperCamelCase = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n"
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(a__ , a__ ) for o in objects] )
_UpperCamelCase = dummy_file
return dummy_files
def _lowercase ( a__ : Dict=False ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
_UpperCamelCase = {"torch": "pt"}
# Locate actual dummy modules and read their content.
_UpperCamelCase = os.path.join(a__ , "utils" )
_UpperCamelCase = {
backend: os.path.join(a__ , f'''dummy_{short_names.get(a__ , a__ )}_objects.py''' )
for backend in dummy_files.keys()
}
_UpperCamelCase = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(a__ ):
with open(a__ , "r" , encoding="utf-8" , newline="\n" ) as f:
_UpperCamelCase = f.read()
else:
_UpperCamelCase = ""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f'''Updating diffusers.utils.dummy_{short_names.get(a__ , a__ )}_objects.py as the main '''
"__init__ has new objects." )
with open(dummy_file_paths[backend] , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"The main __init__ has objects that are not present in "
f'''diffusers.utils.dummy_{short_names.get(a__ , a__ )}_objects.py. Run `make fix-copies` '''
"to fix this." )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
__lowerCAmelCase = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 589
|
from ...configuration_utils import PretrainedConfig
class lowerCamelCase_ ( lowercase ):
__lowercase : Dict = "bert-generation"
def __init__( self , lowerCamelCase_=5_03_58 , lowerCamelCase_=10_24 , lowerCamelCase_=24 , lowerCamelCase_=16 , lowerCamelCase_=40_96 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_12 , lowerCamelCase_=0.02 , lowerCamelCase_=1E-12 , lowerCamelCase_=0 , lowerCamelCase_=2 , lowerCamelCase_=1 , lowerCamelCase_="absolute" , lowerCamelCase_=True , **lowerCamelCase_ , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = position_embedding_type
_UpperCamelCase = use_cache
| 589
| 1
|
"""simple docstring"""
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : int ) -> str:
'''simple docstring'''
UpperCAmelCase_ = inspect.getfile(accelerate.test_utils )
UpperCAmelCase_ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
UpperCAmelCase_ = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def lowercase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = F"""
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
""".split()
UpperCAmelCase_ = [sys.executable] + distributed_args
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
| 82
|
"""simple docstring"""
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
lowerCamelCase = re.compile(r"""\b(a|an|the)\b""", re.UNICODE)
lowerCamelCase = None
def a__ ( ):
UpperCAmelCase_ = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=lowerCAmelCase__ , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=lowerCAmelCase__ , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCAmelCase_ = bool(qa["answers"]["text"] )
return qid_to_has_ans
def a__ ( lowerCAmelCase__ ):
def remove_articles(lowerCAmelCase__ ):
return ARTICLES_REGEX.sub(" " , lowerCAmelCase__ )
def white_space_fix(lowerCAmelCase__ ):
return " ".join(text.split() )
def remove_punc(lowerCAmelCase__ ):
UpperCAmelCase_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCAmelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCAmelCase__ ) ) ) )
def a__ ( lowerCAmelCase__ ):
if not s:
return []
return normalize_answer(lowerCAmelCase__ ).split()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
return int(normalize_answer(lowerCAmelCase__ ) == normalize_answer(lowerCAmelCase__ ) )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = get_tokens(lowerCAmelCase__ )
UpperCAmelCase_ = get_tokens(lowerCAmelCase__ )
UpperCAmelCase_ = collections.Counter(lowerCAmelCase__ ) & collections.Counter(lowerCAmelCase__ )
UpperCAmelCase_ = sum(common.values() )
if len(lowerCAmelCase__ ) == 0 or len(lowerCAmelCase__ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
UpperCAmelCase_ = 1.0 * num_same / len(lowerCAmelCase__ )
UpperCAmelCase_ = 1.0 * num_same / len(lowerCAmelCase__ )
UpperCAmelCase_ = (2 * precision * recall) / (precision + recall)
return fa
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCAmelCase_ = qa["id"]
UpperCAmelCase_ = [t for t in qa["answers"]["text"] if normalize_answer(lowerCAmelCase__ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
UpperCAmelCase_ = [""]
if qid not in preds:
print(f"""Missing prediction for {qid}""" )
continue
UpperCAmelCase_ = preds[qid]
# Take max over all gold answers
UpperCAmelCase_ = max(compute_exact(lowerCAmelCase__ , lowerCAmelCase__ ) for a in gold_answers )
UpperCAmelCase_ = max(compute_fa(lowerCAmelCase__ , lowerCAmelCase__ ) for a in gold_answers )
return exact_scores, fa_scores
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = {}
for qid, s in scores.items():
UpperCAmelCase_ = na_probs[qid] > na_prob_thresh
if pred_na:
UpperCAmelCase_ = float(not qid_to_has_ans[qid] )
else:
UpperCAmelCase_ = s
return new_scores
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ):
if not qid_list:
UpperCAmelCase_ = len(lowerCAmelCase__ )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values() ) / total),
("f1", 100.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
UpperCAmelCase_ = len(lowerCAmelCase__ )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
for k in new_eval:
UpperCAmelCase_ = new_eval[k]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
plt.step(lowerCAmelCase__ , lowerCAmelCase__ , color="b" , alpha=0.2 , where="post" )
plt.fill_between(lowerCAmelCase__ , lowerCAmelCase__ , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(lowerCAmelCase__ )
plt.savefig(lowerCAmelCase__ )
plt.clf()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None ):
UpperCAmelCase_ = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : na_probs[k] )
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = 1.0
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = [1.0]
UpperCAmelCase_ = [0.0]
UpperCAmelCase_ = 0.0
for i, qid in enumerate(lowerCAmelCase__ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
UpperCAmelCase_ = true_pos / float(i + 1 )
UpperCAmelCase_ = true_pos / float(lowerCAmelCase__ )
if i == len(lowerCAmelCase__ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(lowerCAmelCase__ )
recalls.append(lowerCAmelCase__ )
if out_image:
plot_pr_curve(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return {"ap": 100.0 * avg_prec}
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if out_image_dir and not os.path.exists(lowerCAmelCase__ ):
os.makedirs(lowerCAmelCase__ )
UpperCAmelCase_ = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
UpperCAmelCase_ = make_precision_recall_eval(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , out_image=os.path.join(lowerCAmelCase__ , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
UpperCAmelCase_ = make_precision_recall_eval(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , out_image=os.path.join(lowerCAmelCase__ , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
UpperCAmelCase_ = {k: float(lowerCAmelCase__ ) for k, v in qid_to_has_ans.items()}
UpperCAmelCase_ = make_precision_recall_eval(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , out_image=os.path.join(lowerCAmelCase__ , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , "pr_exact" )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , "pr_f1" )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , "pr_oracle" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if not qid_list:
return
UpperCAmelCase_ = [na_probs[k] for k in qid_list]
UpperCAmelCase_ = np.ones_like(lowerCAmelCase__ ) / float(len(lowerCAmelCase__ ) )
plt.hist(lowerCAmelCase__ , weights=lowerCAmelCase__ , bins=20 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(f"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(lowerCAmelCase__ , f"""na_prob_hist_{name}.png""" ) )
plt.clf()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
UpperCAmelCase_ = num_no_ans
UpperCAmelCase_ = cur_score
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : na_probs[k] )
for i, qid in enumerate(lowerCAmelCase__ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
UpperCAmelCase_ = scores[qid]
else:
if preds[qid]:
UpperCAmelCase_ = -1
else:
UpperCAmelCase_ = 0
cur_score += diff
if cur_score > best_score:
UpperCAmelCase_ = cur_score
UpperCAmelCase_ = na_probs[qid]
return 100.0 * best_score / len(lowerCAmelCase__ ), best_thresh
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ , UpperCAmelCase_ = find_best_thresh(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = find_best_thresh(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = best_exact
UpperCAmelCase_ = exact_thresh
UpperCAmelCase_ = best_fa
UpperCAmelCase_ = fa_thresh
def a__ ( ):
with open(OPTS.data_file ) as f:
UpperCAmelCase_ = json.load(lowerCAmelCase__ )
UpperCAmelCase_ = dataset_json["data"]
with open(OPTS.pred_file ) as f:
UpperCAmelCase_ = json.load(lowerCAmelCase__ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
UpperCAmelCase_ = json.load(lowerCAmelCase__ )
else:
UpperCAmelCase_ = {k: 0.0 for k in preds}
UpperCAmelCase_ = make_qid_to_has_ans(lowerCAmelCase__ ) # maps qid to True/False
UpperCAmelCase_ = [k for k, v in qid_to_has_ans.items() if v]
UpperCAmelCase_ = [k for k, v in qid_to_has_ans.items() if not v]
UpperCAmelCase_ , UpperCAmelCase_ = get_raw_scores(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = apply_no_ans_threshold(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , OPTS.na_prob_thresh )
UpperCAmelCase_ = apply_no_ans_threshold(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , OPTS.na_prob_thresh )
UpperCAmelCase_ = make_eval_dict(lowerCAmelCase__ , lowerCAmelCase__ )
if has_ans_qids:
UpperCAmelCase_ = make_eval_dict(lowerCAmelCase__ , lowerCAmelCase__ , qid_list=lowerCAmelCase__ )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , "HasAns" )
if no_ans_qids:
UpperCAmelCase_ = make_eval_dict(lowerCAmelCase__ , lowerCAmelCase__ , qid_list=lowerCAmelCase__ )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , OPTS.out_image_dir )
histogram_na_prob(lowerCAmelCase__ , lowerCAmelCase__ , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(lowerCAmelCase__ , lowerCAmelCase__ , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
else:
print(json.dumps(lowerCAmelCase__ , indent=2 ) )
if __name__ == "__main__":
lowerCamelCase = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("""Agg""")
import matplotlib.pyplot as plt
main()
| 82
| 1
|
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( __A , __A , unittest.TestCase ):
'''simple docstring'''
_lowercase = VQModel
_lowercase = 'sample'
@property
def __lowerCamelCase ( self , __UpperCAmelCase=(32, 32) ):
SCREAMING_SNAKE_CASE_ : int =4
SCREAMING_SNAKE_CASE_ : List[str] =3
SCREAMING_SNAKE_CASE_ : List[str] =floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCAmelCase )
return {"sample": image}
@property
def __lowerCamelCase ( self ):
return (3, 32, 32)
@property
def __lowerCamelCase ( self ):
return (3, 32, 32)
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Any ={
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 3,
}
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.dummy_input
return init_dict, inputs_dict
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] =VQModel.from_pretrained('fusing/vqgan-dummy' , output_loading_info=__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict =model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] =VQModel.from_pretrained('fusing/vqgan-dummy' )
model.to(__UpperCAmelCase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
SCREAMING_SNAKE_CASE_ : List[Any] =torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
SCREAMING_SNAKE_CASE_ : Any =image.to(__UpperCAmelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Any =model(__UpperCAmelCase ).sample
SCREAMING_SNAKE_CASE_ : Tuple =output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE_ : int =torch.tensor([-0.0_153, -0.4_044, -0.1_880, -0.5_161, -0.2_418, -0.4_072, -0.1_612, -0.0_633, -0.0_143] )
# fmt: on
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) )
| 153
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__SCREAMING_SNAKE_CASE = {'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = ['DPTFeatureExtractor']
__SCREAMING_SNAKE_CASE = ['DPTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
'DPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DPTForDepthEstimation',
'DPTForSemanticSegmentation',
'DPTModel',
'DPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 153
| 1
|
from math import pi, sqrt
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
if num <= 0:
raise ValueError('math domain error' )
if num > 1_71.5:
raise OverflowError('math range error' )
elif num - int(__snake_case ) not in (0, 0.5):
raise NotImplementedError('num must be an integer or a half-integer' )
elif num == 0.5:
return sqrt(__snake_case )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def __lowerCamelCase ( ):
'''simple docstring'''
assert gamma(0.5 ) == sqrt(__snake_case )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
_UpperCAmelCase : List[str] = 1.0
while num:
_UpperCAmelCase : str = float(input("""Gamma of: """))
print(F'''gamma({num}) = {gamma(num)}''')
print("""\nEnter 0 to exit...""")
| 362
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowerCamelCase : int = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase : List[Any] = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase : int = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase : Optional[int] = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase : str = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 512,
'''facebook/dpr-ctx_encoder-multiset-base''': 512,
}
lowerCamelCase : List[str] = {
'''facebook/dpr-question_encoder-single-nq-base''': 512,
'''facebook/dpr-question_encoder-multiset-base''': 512,
}
lowerCamelCase : Union[str, Any] = {
'''facebook/dpr-reader-single-nq-base''': 512,
'''facebook/dpr-reader-multiset-base''': 512,
}
lowerCamelCase : Dict = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCamelCase : Union[str, Any] = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCamelCase : Optional[Any] = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class _UpperCamelCase (a_ ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
snake_case_ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
snake_case_ = DPRContextEncoderTokenizer
class _UpperCamelCase (a_ ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
snake_case_ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
snake_case_ = DPRQuestionEncoderTokenizer
lowerCamelCase : Dict = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
lowerCamelCase : int = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
lowerCamelCase : Optional[int] = R'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(a_ )
class _UpperCamelCase :
def __call__( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
elif titles is None or texts is None:
__lowerCAmelCase = titles if texts is None else texts
return super().__call__(
__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
__lowerCAmelCase = titles if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [titles]
__lowerCAmelCase = texts if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [texts]
__lowerCAmelCase = len(__UpperCamelCase )
__lowerCAmelCase = questions if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [questions] * n_passages
assert len(__UpperCamelCase ) == len(
__UpperCamelCase ), F"""There should be as many titles than texts but got {len(__UpperCamelCase )} titles and {len(__UpperCamelCase )} texts."""
__lowerCAmelCase = super().__call__(__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )["input_ids"]
__lowerCAmelCase = super().__call__(__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )["input_ids"]
__lowerCAmelCase = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__UpperCamelCase , __UpperCamelCase )
]
}
if return_attention_mask is not False:
__lowerCAmelCase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__lowerCAmelCase = attention_mask
return self.pad(__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase )
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1_6 , __UpperCamelCase = 6_4 , __UpperCamelCase = 4 , )-> List[DPRSpanPrediction]:
__lowerCAmelCase = reader_input["input_ids"]
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = reader_output[:3]
__lowerCAmelCase = len(__UpperCamelCase )
__lowerCAmelCase = sorted(range(__UpperCamelCase ) , reverse=__UpperCamelCase , key=relevance_logits.__getitem__ )
__lowerCAmelCase = []
for doc_id in sorted_docs:
__lowerCAmelCase = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__lowerCAmelCase = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__lowerCAmelCase = sequence_ids.index(self.pad_token_id )
else:
__lowerCAmelCase = len(__UpperCamelCase )
__lowerCAmelCase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__UpperCamelCase , top_spans=__UpperCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__UpperCamelCase , start_index=__UpperCamelCase , end_index=__UpperCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__UpperCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> List[DPRSpanPrediction]:
__lowerCAmelCase = []
for start_index, start_score in enumerate(__UpperCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__lowerCAmelCase = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x[1] , reverse=__UpperCamelCase )
__lowerCAmelCase = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F"""Wrong span indices: [{start_index}:{end_index}]"""
__lowerCAmelCase = end_index - start_index + 1
assert length <= max_answer_length, F"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__UpperCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a_ )
class _UpperCamelCase (a_ , a_ ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = READER_PRETRAINED_VOCAB_FILES_MAP
snake_case_ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = READER_PRETRAINED_INIT_CONFIGURATION
snake_case_ = ["""input_ids""", """attention_mask"""]
snake_case_ = DPRReaderTokenizer
| 367
| 0
|
"""simple docstring"""
class UpperCAmelCase :
def __init__( self : Optional[int] ):
"""simple docstring"""
_snake_case = ''''''
_snake_case = ''''''
_snake_case = []
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
_snake_case = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
_snake_case = self.__min_dist_top_down_dp(__lowerCamelCase , n - 1 )
_snake_case = self.__min_dist_top_down_dp(m - 1 , __lowerCamelCase )
_snake_case = self.__min_dist_top_down_dp(m - 1 , n - 1 )
_snake_case = 1 + min(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return self.dp[m][n]
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : str ):
"""simple docstring"""
_snake_case = worda
_snake_case = worda
_snake_case = [[-1 for _ in range(len(__lowerCamelCase ) )] for _ in range(len(__lowerCamelCase ) )]
return self.__min_dist_top_down_dp(len(__lowerCamelCase ) - 1 , len(__lowerCamelCase ) - 1 )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : str , __lowerCamelCase : str ):
"""simple docstring"""
_snake_case = worda
_snake_case = worda
_snake_case = len(__lowerCamelCase )
_snake_case = len(__lowerCamelCase )
_snake_case = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
_snake_case = j
elif j == 0: # second string is empty
_snake_case = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
_snake_case = self.dp[i - 1][j - 1]
else:
_snake_case = self.dp[i][j - 1]
_snake_case = self.dp[i - 1][j]
_snake_case = self.dp[i - 1][j - 1]
_snake_case = 1 + min(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return self.dp[m][n]
if __name__ == "__main__":
snake_case = EditDistance()
print('''****************** Testing Edit Distance DP Algorithm ******************''')
print()
snake_case = input('''Enter the first string: ''').strip()
snake_case = input('''Enter the second string: ''').strip()
print()
print(F"The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}")
print(F"The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}")
print()
print('''*************** End of Testing Edit Distance DP Algorithm ***************''')
| 709
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
'''huggingface/time-series-transformer-tourism-monthly''': (
'''https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'''
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : Any = '''time_series_transformer'''
A__ : List[str] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Optional[Any] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : str = "student_t" , __lowerCamelCase : str = "nll" , __lowerCamelCase : int = 1 , __lowerCamelCase : List[int] = [1, 2, 3, 4, 5, 6, 7] , __lowerCamelCase : Optional[Union[str, bool]] = "mean" , __lowerCamelCase : int = 0 , __lowerCamelCase : int = 0 , __lowerCamelCase : int = 0 , __lowerCamelCase : int = 0 , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : int = 3_2 , __lowerCamelCase : int = 3_2 , __lowerCamelCase : int = 2 , __lowerCamelCase : int = 2 , __lowerCamelCase : int = 2 , __lowerCamelCase : int = 2 , __lowerCamelCase : bool = True , __lowerCamelCase : str = "gelu" , __lowerCamelCase : int = 6_4 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : int = 1_0_0 , __lowerCamelCase : float = 0.0_2 , __lowerCamelCase : Optional[Any]=True , **__lowerCamelCase : List[Any] , ):
"""simple docstring"""
# time series specific configuration
_snake_case = prediction_length
_snake_case = context_length or prediction_length
_snake_case = distribution_output
_snake_case = loss
_snake_case = input_size
_snake_case = num_time_features
_snake_case = lags_sequence
_snake_case = scaling
_snake_case = num_dynamic_real_features
_snake_case = num_static_real_features
_snake_case = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
_snake_case = cardinality
else:
_snake_case = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
_snake_case = embedding_dimension
else:
_snake_case = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
_snake_case = num_parallel_samples
# Transformer architecture configuration
_snake_case = input_size * len(__lowerCamelCase ) + self._number_of_features
_snake_case = d_model
_snake_case = encoder_attention_heads
_snake_case = decoder_attention_heads
_snake_case = encoder_ffn_dim
_snake_case = decoder_ffn_dim
_snake_case = encoder_layers
_snake_case = decoder_layers
_snake_case = dropout
_snake_case = attention_dropout
_snake_case = activation_dropout
_snake_case = encoder_layerdrop
_snake_case = decoder_layerdrop
_snake_case = activation_function
_snake_case = init_std
_snake_case = use_cache
super().__init__(is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase )
@property
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 404
| 0
|
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
__lowerCAmelCase = "tiny-wmt19-en-ru"
# Build
# borrowed from a test
__lowerCAmelCase = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
__lowerCAmelCase = dict(zip(vocab, range(len(vocab))))
__lowerCAmelCase = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = Path(tmpdirname)
__lowerCAmelCase = build_dir / VOCAB_FILES_NAMES["src_vocab_file"]
__lowerCAmelCase = build_dir / VOCAB_FILES_NAMES["tgt_vocab_file"]
__lowerCAmelCase = build_dir / VOCAB_FILES_NAMES["merges_file"]
with open(src_vocab_file, 'w') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, 'w') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, 'w') as fp:
fp.write('\n'.join(merges))
__lowerCAmelCase = FSMTTokenizer(
langs=['en', 'ru'],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
__lowerCAmelCase = FSMTConfig(
langs=['ru', 'en'],
src_vocab_size=1_0_0_0,
tgt_vocab_size=1_0_0_0,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
__lowerCAmelCase = FSMTForConditionalGeneration(config)
print(F'''num of params {tiny_model.num_parameters()}''')
# Test
__lowerCAmelCase = tokenizer(['Making tiny model'], return_tensors='pt')
__lowerCAmelCase = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 201
|
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def UpperCAmelCase_ ( A , A , A=None , A=None ):
'''simple docstring'''
if attention_mask is None:
_a : List[Any] = tf.cast(tf.math.not_equal(A , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class a :
'''simple docstring'''
__lowerCAmelCase : List[str] = OPTConfig
__lowerCAmelCase : Union[str, Any] = {}
__lowerCAmelCase : Any = """gelu"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_=1_3 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_=9_9 , lowerCamelCase_=1_6 , lowerCamelCase_=2 , lowerCamelCase_=4 , lowerCamelCase_=4 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=2_0 , lowerCamelCase_=2 , lowerCamelCase_=1 , lowerCamelCase_=0 , lowerCamelCase_=1_6 , lowerCamelCase_=1_6 , ) -> List[Any]:
_a : Tuple = parent
_a : List[str] = batch_size
_a : str = seq_length
_a : Optional[Any] = is_training
_a : List[str] = use_labels
_a : Optional[int] = vocab_size
_a : int = hidden_size
_a : List[Any] = num_hidden_layers
_a : Optional[int] = num_attention_heads
_a : int = intermediate_size
_a : Union[str, Any] = hidden_act
_a : Any = hidden_dropout_prob
_a : List[Any] = attention_probs_dropout_prob
_a : int = max_position_embeddings
_a : Optional[Any] = eos_token_id
_a : int = pad_token_id
_a : Optional[int] = bos_token_id
_a : Dict = embed_dim
_a : Union[str, Any] = word_embed_proj_dim
_a : str = False
def __UpperCamelCase ( self ) -> Union[str, Any]:
_a : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_a : Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_a : Union[str, Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
_a : Optional[int] = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowerCamelCase_ , **self.config_updates , )
_a : Any = prepare_opt_inputs_dict(lowerCamelCase_ , lowerCamelCase_ )
return config, inputs_dict
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ ) -> str:
_a : List[str] = TFOPTModel(config=lowerCamelCase_ )
_a : Any = inputs_dict['input_ids']
_a : Union[str, Any] = input_ids[:1, :]
_a : Any = inputs_dict['attention_mask'][:1, :]
_a : str = 1
# first forward pass
_a : str = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , use_cache=lowerCamelCase_ )
_a , _a : int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_a : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
_a : List[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_a : List[str] = tf.concat([input_ids, next_tokens] , axis=-1 )
_a : Dict = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_a : Union[str, Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )[0]
_a : int = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_a : Dict = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_a : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx]
_a : Dict = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCamelCase_ , lowerCamelCase_ , rtol=1e-3 )
@require_tf
class a ( snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase : List[Any] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
__lowerCAmelCase : int = (TFOPTForCausalLM,) if is_tf_available() else ()
__lowerCAmelCase : str = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
__lowerCAmelCase : List[Any] = False
__lowerCAmelCase : List[str] = False
__lowerCAmelCase : Optional[Any] = False
__lowerCAmelCase : List[str] = 10
def __UpperCamelCase ( self ) -> str:
_a : str = TFOPTModelTester(self )
_a : Optional[int] = ConfigTester(self , config_class=lowerCamelCase_ )
def __UpperCamelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __UpperCamelCase ( self ) -> List[str]:
_a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase_ )
def __UpperCamelCase ( self ) -> Any:
_a , _a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowerCamelCase_ , lowerCamelCase_ ):
if hasattr(lowerCamelCase_ , 'weight' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowerCamelCase_ , 'weight' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]:
# build the embeddings
_a : Union[str, Any] = model_class(config=lowerCamelCase_ )
_a : Optional[int] = _get_word_embedding_weight(lowerCamelCase_ , model.get_input_embeddings() )
_a : Optional[int] = _get_word_embedding_weight(lowerCamelCase_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowerCamelCase_ )
_a : str = _get_word_embedding_weight(lowerCamelCase_ , model.get_input_embeddings() )
_a : Union[str, Any] = _get_word_embedding_weight(lowerCamelCase_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
_a : Optional[int] = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowerCamelCase_ )
# check that weights remain the same after resizing
_a : Optional[int] = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_a : List[str] = False
self.assertTrue(lowerCamelCase_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowerCamelCase_ )
_a : Optional[Any] = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_a : Optional[Any] = False
self.assertTrue(lowerCamelCase_ )
def UpperCAmelCase_ ( A ):
'''simple docstring'''
return tf.constant(A , dtype=tf.intaa )
@require_tf
class a ( unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = 99
def __UpperCamelCase ( self ) -> List[str]:
_a : List[str] = tf.ones((4, 1) , dtype=tf.intaa ) * 2
_a : Any = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
_a : Tuple = input_ids.shape[0]
_a : Any = OPTConfig(
vocab_size=self.vocab_size , hidden_size=2_4 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class a ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCamelCase ( self ) -> Union[str, Any]:
_a : Any = TFOPTModel.from_pretrained('facebook/opt-350m' )
_a : Union[str, Any] = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
_a : Any = tf.not_equal(lowerCamelCase_ , model.config.pad_token_id )
with tf.GradientTape():
_a : Dict = model(input_ids=lowerCamelCase_ , attention_mask=lowerCamelCase_ ).last_hidden_state
_a : List[Any] = (1, 1_1, 5_1_2)
self.assertEqual(output.shape , lowerCamelCase_ )
_a : int = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase_ , atol=4e-3 ) )
_a : Union[str, Any] = tf.function(lowerCamelCase_ , jit_compile=lowerCamelCase_ )
_a : List[str] = xla_generate(lowerCamelCase_ , lowerCamelCase_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase_ , atol=4e-2 ) )
@require_tf
@slow
class a ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ) -> Any:
super().setUp()
_a : Any = 'facebook/opt-350m'
def __UpperCamelCase ( self ) -> str:
_a : List[Any] = TFOPTForCausalLM.from_pretrained(self.path_model )
_a : List[str] = GPTaTokenizer.from_pretrained(self.path_model )
_a : Dict = [
'Today is a beautiful day and I want to',
'In the city of',
'Paris is the capital of France and',
'Computers and mobile phones have taken',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
_a : Union[str, Any] = tokenizer(lowerCamelCase_ , return_tensors='tf' , padding=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
_a : List[Any] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
_a : int = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
] )
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-4 ) )
_a : Any = tf.function(lowerCamelCase_ , jit_compile=lowerCamelCase_ )
_a : List[Any] = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-4 ) )
@require_tf
@slow
class a ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCamelCase ( self ) -> Optional[int]:
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def __UpperCamelCase ( self ) -> List[Any]:
_a : Any = 'facebook/opt-125m'
_a : List[str] = [
'Today is a beautiful day and I want to',
'In the city of New York, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
_a : str = []
_a : Optional[Any] = GPTaTokenizer.from_pretrained(lowerCamelCase_ )
_a : Tuple = TFOPTForCausalLM.from_pretrained(lowerCamelCase_ )
for prompt in self.prompts:
_a : Tuple = tokenizer(lowerCamelCase_ , return_tensors='tf' ).input_ids
_a : Union[str, Any] = model.generate(lowerCamelCase_ , max_length=1_0 )
_a : Any = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
predicted_outputs += generated_string
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCamelCase ( self ) -> Tuple:
_a : int = 'facebook/opt-350m'
_a : Dict = GPTaTokenizer.from_pretrained(lowerCamelCase_ )
_a : Any = TFOPTForCausalLM.from_pretrained(lowerCamelCase_ )
_a : Any = 'left'
# use different length sentences to test batching
_a : Optional[Any] = [
'Hello, my dog is a little',
'Today, I',
]
_a : List[Any] = tokenizer(lowerCamelCase_ , return_tensors='tf' , padding=lowerCamelCase_ )
_a : Dict = inputs['input_ids']
_a : Tuple = model.generate(input_ids=lowerCamelCase_ , attention_mask=inputs['attention_mask'] )
_a : Dict = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
_a : Union[str, Any] = model.generate(input_ids=lowerCamelCase_ )
_a : Any = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['attention_mask'][-1] , tf.intaa ) )
_a : Union[str, Any] = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
_a : str = model.generate(input_ids=lowerCamelCase_ , max_length=model.config.max_length - num_paddings )
_a : str = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
_a : Optional[int] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCamelCase_ )
_a : Any = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCamelCase_ )
_a : Tuple = [
'Hello, my dog is a little bit of a dork.\nI\'m a little bit',
'Today, I was in the middle of a conversation with a friend about the',
]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , [non_padded_sentence, padded_sentence] )
def __UpperCamelCase ( self ) -> Union[str, Any]:
_a : str = 'facebook/opt-350m'
_a : List[str] = [
'Today is a beautiful day and I want to',
'In the city of San Francisco, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
_a : List[str] = []
_a : Union[str, Any] = GPTaTokenizer.from_pretrained(lowerCamelCase_ )
_a : int = TFOPTForCausalLM.from_pretrained(lowerCamelCase_ )
for prompt in self.prompts:
_a : List[str] = tokenizer(lowerCamelCase_ , return_tensors='tf' ).input_ids
_a : List[Any] = model.generate(lowerCamelCase_ , max_length=1_0 )
_a : Any = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
predicted_outputs += generated_string
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
| 120
| 0
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase (__lowerCamelCase , unittest.TestCase ):
_lowerCamelCase = LongformerTokenizer
_lowerCamelCase = True
_lowerCamelCase = LongformerTokenizerFast
_lowerCamelCase = True
def __UpperCamelCase ( self : Optional[Any]):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase__ : List[str] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
UpperCamelCase__ : Optional[int] = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
UpperCamelCase__ : str = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
UpperCamelCase__ : List[str] = {'unk_token': '<unk>'}
UpperCamelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
UpperCamelCase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(UpperCAmelCase_) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(UpperCAmelCase_))
def __UpperCamelCase ( self : int , **UpperCAmelCase_ : int):
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[Any] , **UpperCAmelCase_ : Dict):
kwargs.update(self.special_tokens_map)
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : str):
UpperCamelCase__ : List[Any] = 'lower newer'
UpperCamelCase__ : str = 'lower newer'
return input_text, output_text
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Optional[int] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map)
UpperCamelCase__ : List[Any] = 'lower newer'
UpperCamelCase__ : Optional[int] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
UpperCamelCase__ : int = tokenizer.tokenize(UpperCAmelCase_) # , add_prefix_space=True)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = tokens + [tokenizer.unk_token]
UpperCamelCase__ : Tuple = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any]):
UpperCamelCase__ : List[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=UpperCAmelCase_) , [0, 31_414, 232, 328, 2])
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=UpperCAmelCase_) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : List[str] = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096')
UpperCamelCase__ : str = tokenizer.encode('sequence builders' , add_special_tokens=UpperCAmelCase_)
UpperCamelCase__ : int = tokenizer.encode('multi-sequence build' , add_special_tokens=UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = tokenizer.encode(
'sequence builders' , add_special_tokens=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_)
UpperCamelCase__ : int = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_)
UpperCamelCase__ : List[Any] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_)
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Tuple = self.get_tokenizer()
UpperCamelCase__ : Union[str, Any] = 'Encode this sequence.'
UpperCamelCase__ : Union[str, Any] = tokenizer.byte_encoder[' '.encode('utf-8')[0]]
# Testing encoder arguments
UpperCamelCase__ : List[Any] = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_)
UpperCamelCase__ : Tuple = tokenizer.convert_ids_to_tokens(encoded[0])[0]
self.assertNotEqual(UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : List[Any] = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_)
UpperCamelCase__ : int = tokenizer.convert_ids_to_tokens(encoded[0])[0]
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
tokenizer.add_special_tokens({'bos_token': '<s>'})
UpperCamelCase__ : Dict = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)
UpperCamelCase__ : Any = tokenizer.convert_ids_to_tokens(encoded[1])[0]
self.assertNotEqual(UpperCAmelCase_ , UpperCAmelCase_)
# Testing spaces after special tokens
UpperCamelCase__ : Tuple = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_)}) # mask token has a left space
UpperCamelCase__ : Any = tokenizer.convert_tokens_to_ids(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = 'Encode <mask> sequence'
UpperCamelCase__ : Optional[Any] = 'Encode <mask>sequence'
UpperCamelCase__ : List[str] = tokenizer.encode(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = encoded.index(UpperCAmelCase_)
UpperCamelCase__ : int = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1])[0]
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : Any = tokenizer.encode(UpperCAmelCase_)
UpperCamelCase__ : Any = encoded.index(UpperCAmelCase_)
UpperCamelCase__ : Dict = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1])[0]
self.assertNotEqual(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : List[str]):
pass
def __UpperCamelCase ( self : Optional[int]):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})'):
UpperCamelCase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_)
UpperCamelCase__ : List[str] = self.tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = 'A, <mask> AllenNLP sentence.'
UpperCamelCase__ : Any = tokenizer_r.encode_plus(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = tokenizer_p.encode_plus(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_)
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids']) , sum(tokens_p['token_type_ids']))
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask']) / len(tokens_r['attention_mask']) , sum(tokens_p['attention_mask']) / len(tokens_p['attention_mask']) , )
UpperCamelCase__ : List[str] = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'])
UpperCamelCase__ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'])
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2])
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2])
self.assertSequenceEqual(
UpperCAmelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'])
self.assertSequenceEqual(
UpperCAmelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'])
def __UpperCamelCase ( self : Tuple):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2):
UpperCamelCase__ : Any = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_)
UpperCamelCase__ : int = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__())
UpperCamelCase__ : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__())
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , UpperCAmelCase_)
self.assertEqual(post_processor_state['add_prefix_space'] , UpperCAmelCase_)
self.assertEqual(post_processor_state['trim_offsets'] , UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any]):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})'):
UpperCamelCase__ : Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase__ : str = F'{text_of_1_token} {text_of_1_token}'
UpperCamelCase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase_ , use_fast=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_)
UpperCamelCase__ : Any = tokenizer_r(UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase_)))
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCAmelCase_) + 1, len(UpperCAmelCase_) + 1 + len(UpperCAmelCase_)) , )
UpperCamelCase__ : Any = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase_ , use_fast=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_)
UpperCamelCase__ : str = tokenizer_r(UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase_)))
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCAmelCase_) + 1, len(UpperCAmelCase_) + 1 + len(UpperCAmelCase_)) , )
UpperCamelCase__ : Any = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase_ , use_fast=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = tokenizer_r(UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase_)))
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCAmelCase_), len(UpperCAmelCase_) + 1 + len(UpperCAmelCase_)) , )
UpperCamelCase__ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase_ , use_fast=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = tokenizer_r(UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase_)))
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCAmelCase_), len(UpperCAmelCase_) + 1 + len(UpperCAmelCase_)) , )
UpperCamelCase__ : Dict = F' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
UpperCamelCase__ : int = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase_ , use_fast=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_)
UpperCamelCase__ : Any = tokenizer_r(UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCAmelCase_)))
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCAmelCase_) + 1, 1 + len(UpperCAmelCase_) + 1 + len(UpperCAmelCase_)) , )
UpperCamelCase__ : List[str] = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase_ , use_fast=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_)
UpperCamelCase__ : Dict = tokenizer_r(UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCAmelCase_)))
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCAmelCase_), 1 + len(UpperCAmelCase_) + 1 + len(UpperCAmelCase_)) , )
UpperCamelCase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase_ , use_fast=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = tokenizer_r(UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCAmelCase_)))
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCAmelCase_), 1 + len(UpperCAmelCase_) + 1 + len(UpperCAmelCase_)) , )
| 704
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowerCamelCase_) -> Any:
UpperCamelCase__ : Dict = DPTConfig()
if "large" in checkpoint_url:
UpperCamelCase__ : List[str] = 1_024
UpperCamelCase__ : List[str] = 4_096
UpperCamelCase__ : Optional[int] = 24
UpperCamelCase__ : List[str] = 16
UpperCamelCase__ : List[str] = [5, 11, 17, 23]
UpperCamelCase__ : str = [256, 512, 1_024, 1_024]
UpperCamelCase__ : Union[str, Any] = (1, 384, 384)
if "ade" in checkpoint_url:
UpperCamelCase__ : int = True
UpperCamelCase__ : Optional[Any] = 150
UpperCamelCase__ : int = 'huggingface/label-files'
UpperCamelCase__ : List[Any] = 'ade20k-id2label.json'
UpperCamelCase__ : List[Any] = json.load(open(cached_download(hf_hub_url(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset')) , 'r'))
UpperCamelCase__ : int = {int(lowerCamelCase_): v for k, v in idalabel.items()}
UpperCamelCase__ : Union[str, Any] = idalabel
UpperCamelCase__ : List[str] = {v: k for k, v in idalabel.items()}
UpperCamelCase__ : Any = [1, 150, 480, 480]
return config, expected_shape
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]:
UpperCamelCase__ : Tuple = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
UpperCamelCase__ : Union[str, Any] = name.replace('pretrained.model' , 'dpt.encoder')
if "pretrained.model" in name:
UpperCamelCase__ : Dict = name.replace('pretrained.model' , 'dpt.embeddings')
if "patch_embed" in name:
UpperCamelCase__ : Tuple = name.replace('patch_embed' , 'patch_embeddings')
if "pos_embed" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pos_embed' , 'position_embeddings')
if "attn.proj" in name:
UpperCamelCase__ : List[Any] = name.replace('attn.proj' , 'attention.output.dense')
if "proj" in name and "project" not in name:
UpperCamelCase__ : Optional[Any] = name.replace('proj' , 'projection')
if "blocks" in name:
UpperCamelCase__ : int = name.replace('blocks' , 'layer')
if "mlp.fc1" in name:
UpperCamelCase__ : int = name.replace('mlp.fc1' , 'intermediate.dense')
if "mlp.fc2" in name:
UpperCamelCase__ : Tuple = name.replace('mlp.fc2' , 'output.dense')
if "norm1" in name:
UpperCamelCase__ : List[Any] = name.replace('norm1' , 'layernorm_before')
if "norm2" in name:
UpperCamelCase__ : int = name.replace('norm2' , 'layernorm_after')
if "scratch.output_conv" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('scratch.output_conv' , 'head')
if "scratch" in name:
UpperCamelCase__ : int = name.replace('scratch' , 'neck')
if "layer1_rn" in name:
UpperCamelCase__ : Optional[Any] = name.replace('layer1_rn' , 'convs.0')
if "layer2_rn" in name:
UpperCamelCase__ : List[Any] = name.replace('layer2_rn' , 'convs.1')
if "layer3_rn" in name:
UpperCamelCase__ : List[Any] = name.replace('layer3_rn' , 'convs.2')
if "layer4_rn" in name:
UpperCamelCase__ : List[str] = name.replace('layer4_rn' , 'convs.3')
if "refinenet" in name:
UpperCamelCase__ : int = int(name[len('neck.refinenet') : len('neck.refinenet') + 1])
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
UpperCamelCase__ : Any = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4)}')
if "out_conv" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('out_conv' , 'projection')
if "resConfUnit1" in name:
UpperCamelCase__ : int = name.replace('resConfUnit1' , 'residual_layer1')
if "resConfUnit2" in name:
UpperCamelCase__ : Optional[Any] = name.replace('resConfUnit2' , 'residual_layer2')
if "conv1" in name:
UpperCamelCase__ : Optional[Any] = name.replace('conv1' , 'convolution1')
if "conv2" in name:
UpperCamelCase__ : int = name.replace('conv2' , 'convolution2')
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
UpperCamelCase__ : Any = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0')
if "pretrained.act_postprocess2.0.project.0" in name:
UpperCamelCase__ : Tuple = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0')
if "pretrained.act_postprocess3.0.project.0" in name:
UpperCamelCase__ : int = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0')
if "pretrained.act_postprocess4.0.project.0" in name:
UpperCamelCase__ : int = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0')
# resize blocks
if "pretrained.act_postprocess1.3" in name:
UpperCamelCase__ : Tuple = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection')
if "pretrained.act_postprocess1.4" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize')
if "pretrained.act_postprocess2.3" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection')
if "pretrained.act_postprocess2.4" in name:
UpperCamelCase__ : Dict = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize')
if "pretrained.act_postprocess3.3" in name:
UpperCamelCase__ : Any = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection')
if "pretrained.act_postprocess4.3" in name:
UpperCamelCase__ : List[Any] = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection')
if "pretrained.act_postprocess4.4" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize')
if "pretrained" in name:
UpperCamelCase__ : List[str] = name.replace('pretrained' , 'dpt')
if "bn" in name:
UpperCamelCase__ : Tuple = name.replace('bn' , 'batch_norm')
if "head" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('head' , 'head.head')
if "encoder.norm" in name:
UpperCamelCase__ : int = name.replace('encoder.norm' , 'layernorm')
if "auxlayer" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('auxlayer' , 'auxiliary_head.head')
return name
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Any:
for i in range(config.num_hidden_layers):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase__ : Optional[int] = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight')
UpperCamelCase__ : Any = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias')
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ : List[str] = in_proj_weight[: config.hidden_size, :]
UpperCamelCase__ : List[Any] = in_proj_bias[: config.hidden_size]
UpperCamelCase__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase__ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ : int = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( ) -> Optional[Any]:
UpperCamelCase__ : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase__ : List[Any] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_).raw)
return im
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Dict:
UpperCamelCase__, UpperCamelCase__ : Any = get_dpt_config(lowerCamelCase_)
# load original state_dict from URL
UpperCamelCase__ : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='cpu')
# remove certain keys
remove_ignore_keys_(lowerCamelCase_)
# rename keys
for key in state_dict.copy().keys():
UpperCamelCase__ : str = state_dict.pop(lowerCamelCase_)
UpperCamelCase__ : List[str] = val
# read in qkv matrices
read_in_q_k_v(lowerCamelCase_ , lowerCamelCase_)
# load HuggingFace model
UpperCamelCase__ : str = DPTForSemanticSegmentation(lowerCamelCase_) if 'ade' in checkpoint_url else DPTForDepthEstimation(lowerCamelCase_)
model.load_state_dict(lowerCamelCase_)
model.eval()
# Check outputs on an image
UpperCamelCase__ : Any = 480 if 'ade' in checkpoint_url else 384
UpperCamelCase__ : List[Any] = DPTImageProcessor(size=lowerCamelCase_)
UpperCamelCase__ : int = prepare_img()
UpperCamelCase__ : Optional[Any] = image_processor(lowerCamelCase_ , return_tensors='pt')
# forward pass
UpperCamelCase__ : Any = model(**lowerCamelCase_).logits if 'ade' in checkpoint_url else model(**lowerCamelCase_).predicted_depth
# Assert logits
UpperCamelCase__ : Tuple = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]])
if "ade" in checkpoint_url:
UpperCamelCase__ : List[str] = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]])
assert outputs.shape == torch.Size(lowerCamelCase_)
assert (
torch.allclose(outputs[0, 0, :3, :3] , lowerCamelCase_ , atol=1e-4)
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , lowerCamelCase_)
)
Path(lowerCamelCase_).mkdir(exist_ok=lowerCamelCase_)
print(f'Saving model to {pytorch_dump_folder_path}')
model.save_pretrained(lowerCamelCase_)
print(f'Saving image processor to {pytorch_dump_folder_path}')
image_processor.save_pretrained(lowerCamelCase_)
if push_to_hub:
print('Pushing model to hub...')
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCamelCase_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCamelCase_ , )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
lowerCAmelCase__ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 6
| 0
|
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _a :
'''simple docstring'''
def __init__( self, A, A=13, A=32, A=3, A=4, A=[10, 20, 30, 40], A=[2, 2, 3, 2], A=True, A=True, A=37, A="gelu", A=10, A=0.02, A=["stage2", "stage3", "stage4"], A=3, A=None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = parent
SCREAMING_SNAKE_CASE : str = batch_size
SCREAMING_SNAKE_CASE : Optional[int] = image_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : int = num_stages
SCREAMING_SNAKE_CASE : List[str] = hidden_sizes
SCREAMING_SNAKE_CASE : Optional[Any] = depths
SCREAMING_SNAKE_CASE : Dict = is_training
SCREAMING_SNAKE_CASE : Dict = use_labels
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : Dict = type_sequence_label_size
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : List[Any] = out_features
SCREAMING_SNAKE_CASE : Any = num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = scope
SCREAMING_SNAKE_CASE : Any = num_stages
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self ):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, )
def UpperCamelCase_ ( self ):
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config(), hidden_size=512, pool_scales=[1, 2, 3, 6], use_auxiliary_head=A, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=A, loss_ignore_index=255, num_labels=self.num_labels, )
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = UperNetForSemanticSegmentation(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(A )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : Dict = config_and_inputs
SCREAMING_SNAKE_CASE : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : str = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
A : Dict = {'''image-segmentation''': UperNetForSemanticSegmentation} if is_torch_available() else {}
A : List[Any] = False
A : Tuple = False
A : Optional[Any] = False
A : str = False
A : List[str] = False
A : int = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = UperNetModelTester(self )
SCREAMING_SNAKE_CASE : Any = ConfigTester(self, config_class=A, has_text_modality=A, hidden_size=37 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self ):
'''simple docstring'''
return
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[Any] = model_class(A )
SCREAMING_SNAKE_CASE : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Any = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Any = ['pixel_values']
self.assertListEqual(arg_names[:1], A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A )
@unittest.skip(reason='UperNet does not use inputs_embeds' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='UperNet does not support input and output embeddings' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='UperNet does not have a base model' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='UperNet does not have a base model' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
def check_hidden_states_output(A, A, A ):
SCREAMING_SNAKE_CASE : str = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model(**self._prepare_for_class(A, A ) )
SCREAMING_SNAKE_CASE : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(A ), expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[str] = True
check_hidden_states_output(A, A, A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Any = True
check_hidden_states_output(A, A, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Any = _config_zero_init(A )
SCREAMING_SNAKE_CASE : Tuple = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : str = model_class(config=A )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=F"Parameter {name} of model {model_class} seems not properly initialized", )
@unittest.skip(reason='UperNet does not have tied weights' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Dict = UperNetForSemanticSegmentation.from_pretrained(A )
self.assertIsNotNone(A )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = hf_hub_download(
repo_id='hf-internal-testing/fixtures_ade20k' ,repo_type='dataset' ,filename='ADE_val_00000001.jpg' )
SCREAMING_SNAKE_CASE : Optional[int] = Image.open(__UpperCamelCase ).convert('RGB' )
return image
@require_torch
@require_vision
@slow
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = AutoImageProcessor.from_pretrained('openmmlab/upernet-swin-tiny' )
SCREAMING_SNAKE_CASE : Any = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-swin-tiny' ).to(A )
SCREAMING_SNAKE_CASE : str = prepare_img()
SCREAMING_SNAKE_CASE : str = processor(images=A, return_tensors='pt' ).to(A )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = model(**A )
SCREAMING_SNAKE_CASE : str = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, A )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
[[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], A, atol=1E-4 ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = AutoImageProcessor.from_pretrained('openmmlab/upernet-convnext-tiny' )
SCREAMING_SNAKE_CASE : Dict = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-convnext-tiny' ).to(A )
SCREAMING_SNAKE_CASE : Tuple = prepare_img()
SCREAMING_SNAKE_CASE : int = processor(images=A, return_tensors='pt' ).to(A )
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = model(**A )
SCREAMING_SNAKE_CASE : Any = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, A )
SCREAMING_SNAKE_CASE : Any = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], A, atol=1E-4 ) )
| 28
|
lowerCamelCase__ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def _lowerCamelCase( __snake_case , __snake_case , __snake_case , __snake_case ) -> List[str]:
# Return True if there is node that has not iterated.
__snake_case = [False] * len(__snake_case )
__snake_case = [s]
__snake_case = True
while queue:
__snake_case = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__snake_case )
__snake_case = True
__snake_case = u
return visited[t]
def _lowerCamelCase( __snake_case , __snake_case , __snake_case ) -> Union[str, Any]:
__snake_case = [-1] * (len(__snake_case ))
__snake_case = 0
__snake_case = []
__snake_case = [i[:] for i in graph] # Record original cut, copy.
while bfs(__snake_case , __snake_case , __snake_case , __snake_case ):
__snake_case = float("Inf" )
__snake_case = sink
while s != source:
# Find the minimum value in select path
__snake_case = min(__snake_case , graph[parent[s]][s] )
__snake_case = parent[s]
max_flow += path_flow
__snake_case = sink
while v != source:
__snake_case = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__snake_case = parent[v]
for i in range(len(__snake_case ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 524
| 0
|
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def __lowerCamelCase ( A__ ) -> Dict:
"""simple docstring"""
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def __lowerCamelCase ( A__ , A__ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
UpperCamelCase = key.replace('heads.cmd.mim_head.cls.predictions' , 'mmm_image_head' )
UpperCamelCase = key.replace('heads.cmd.mlm_head.cls.predictions' , 'mmm_text_head' )
UpperCamelCase = key.replace('heads.cmd.itm_head.cls' , 'itm_head' )
UpperCamelCase = key.replace('heads.cmd.itm_head.pooler' , 'itm_head.pooler' )
UpperCamelCase = key.replace('heads.cmd.clip_head.logit_scale' , 'flava.logit_scale' )
UpperCamelCase = key.replace('heads.fairseq_mlm.cls.predictions' , 'mlm_head' )
UpperCamelCase = key.replace('heads.imagenet.mim_head.cls.predictions' , 'mim_head' )
UpperCamelCase = key.replace('mm_text_projection' , 'flava.text_to_mm_projection' )
UpperCamelCase = key.replace('mm_image_projection' , 'flava.image_to_mm_projection' )
UpperCamelCase = key.replace('image_encoder.module' , 'flava.image_model' )
UpperCamelCase = key.replace('text_encoder.module' , 'flava.text_model' )
UpperCamelCase = key.replace('mm_encoder.module.encoder.cls_token' , 'flava.multimodal_model.cls_token' )
UpperCamelCase = key.replace('mm_encoder.module' , 'flava.multimodal_model' )
UpperCamelCase = key.replace('text_projection' , 'flava.text_projection' )
UpperCamelCase = key.replace('image_projection' , 'flava.image_projection' )
UpperCamelCase = value.float()
for key, value in codebook_state_dict.items():
UpperCamelCase = value
return upgrade
@torch.no_grad()
def __lowerCamelCase ( A__ , A__ , A__ , A__=None ) -> Union[str, Any]:
"""simple docstring"""
if config_path is not None:
UpperCamelCase = FlavaConfig.from_pretrained(A__ )
else:
UpperCamelCase = FlavaConfig()
UpperCamelCase = FlavaForPreTraining(A__ ).eval()
UpperCamelCase = convert_dalle_checkpoint(A__ , A__ , save_checkpoint=A__ )
if os.path.exists(A__ ):
UpperCamelCase = torch.load(A__ , map_location='cpu' )
else:
UpperCamelCase = torch.hub.load_state_dict_from_url(A__ , map_location='cpu' )
UpperCamelCase = upgrade_state_dict(A__ , A__ )
hf_model.load_state_dict(A__ )
UpperCamelCase = hf_model.state_dict()
UpperCamelCase = count_parameters(A__ )
UpperCamelCase = count_parameters(A__ ) + count_parameters(A__ )
assert torch.allclose(A__ , A__ , atol=1e-3 )
hf_model.save_pretrained(A__ )
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
_lowerCamelCase : Any = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 705
|
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class SCREAMING_SNAKE_CASE ( _a , _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 1
@register_to_config
def __init__( self : List[Any] , UpperCamelCase__ : str=2_0_0_0 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Optional[int]=2_0 , UpperCamelCase__ : Union[str, Any]=1E-3 ):
"""simple docstring"""
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
def A ( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, torch.device] = None ):
"""simple docstring"""
UpperCamelCase = torch.linspace(1 , self.config.sampling_eps , UpperCamelCase__ , device=UpperCamelCase__ )
def A ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any]=None ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
UpperCamelCase = (
-0.2_5 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
UpperCamelCase = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
UpperCamelCase = std.flatten()
while len(std.shape ) < len(score.shape ):
UpperCamelCase = std.unsqueeze(-1 )
UpperCamelCase = -score / std
# compute
UpperCamelCase = -1.0 / len(self.timesteps )
UpperCamelCase = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
UpperCamelCase = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
UpperCamelCase = beta_t.unsqueeze(-1 )
UpperCamelCase = -0.5 * beta_t * x
UpperCamelCase = torch.sqrt(UpperCamelCase__ )
UpperCamelCase = drift - diffusion**2 * score
UpperCamelCase = x + drift * dt
# add noise
UpperCamelCase = randn_tensor(x.shape , layout=x.layout , generator=UpperCamelCase__ , device=x.device , dtype=x.dtype )
UpperCamelCase = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : List[str] ):
"""simple docstring"""
return self.config.num_train_timesteps
| 324
| 0
|
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = 1000 ):
lowercase__ , lowercase__ = 1, 1
lowercase__ = 2
while True:
lowercase__ = 0
lowercase__ = fa + fa
lowercase__ , lowercase__ = fa, f
index += 1
for _ in str(SCREAMING_SNAKE_CASE_ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 413
|
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowercase_ = logging.get_logger(__name__)
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
def constraint_to_multiple_of(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=None ):
lowercase__ = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
lowercase__ = math.floor(val / multiple ) * multiple
if x < min_val:
lowercase__ = math.ceil(val / multiple ) * multiple
return x
lowercase__ = (output_size, output_size) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else output_size
lowercase__ , lowercase__ = get_image_size(SCREAMING_SNAKE_CASE_ )
lowercase__ , lowercase__ = output_size
# determine new height and width
lowercase__ = output_height / input_height
lowercase__ = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
lowercase__ = scale_width
else:
# fit height
lowercase__ = scale_height
lowercase__ = constraint_to_multiple_of(scale_height * input_height , multiple=SCREAMING_SNAKE_CASE_ )
lowercase__ = constraint_to_multiple_of(scale_width * input_width , multiple=SCREAMING_SNAKE_CASE_ )
return (new_height, new_width)
class _snake_case ( lowercase__):
UpperCamelCase__ : Tuple =["""pixel_values"""]
def __init__( self : Any, __lowercase : bool = True, __lowercase : Dict[str, int] = None, __lowercase : PILImageResampling = PILImageResampling.BILINEAR, __lowercase : bool = False, __lowercase : int = 1, __lowercase : bool = True, __lowercase : Union[int, float] = 1 / 255, __lowercase : bool = True, __lowercase : Optional[Union[float, List[float]]] = None, __lowercase : Optional[Union[float, List[float]]] = None, **__lowercase : List[Any], ):
super().__init__(**__lowercase )
lowercase__ = size if size is not None else {"height": 384, "width": 384}
lowercase__ = get_size_dict(__lowercase )
lowercase__ = do_resize
lowercase__ = size
lowercase__ = keep_aspect_ratio
lowercase__ = ensure_multiple_of
lowercase__ = resample
lowercase__ = do_rescale
lowercase__ = rescale_factor
lowercase__ = do_normalize
lowercase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A__ ( self : List[Any], __lowercase : np.ndarray, __lowercase : Dict[str, int], __lowercase : bool = False, __lowercase : int = 1, __lowercase : PILImageResampling = PILImageResampling.BICUBIC, __lowercase : Optional[Union[str, ChannelDimension]] = None, **__lowercase : Union[str, Any], ):
lowercase__ = get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
lowercase__ = get_resize_output_image_size(
__lowercase, output_size=(size["height"], size["width"]), keep_aspect_ratio=__lowercase, multiple=__lowercase, )
return resize(__lowercase, size=__lowercase, resample=__lowercase, data_format=__lowercase, **__lowercase )
def A__ ( self : str, __lowercase : np.ndarray, __lowercase : Union[int, float], __lowercase : Optional[Union[str, ChannelDimension]] = None, **__lowercase : List[Any], ):
return rescale(__lowercase, scale=__lowercase, data_format=__lowercase, **__lowercase )
def A__ ( self : Any, __lowercase : np.ndarray, __lowercase : Union[float, List[float]], __lowercase : Union[float, List[float]], __lowercase : Optional[Union[str, ChannelDimension]] = None, **__lowercase : Optional[Any], ):
return normalize(__lowercase, mean=__lowercase, std=__lowercase, data_format=__lowercase, **__lowercase )
def A__ ( self : List[str], __lowercase : ImageInput, __lowercase : bool = None, __lowercase : int = None, __lowercase : bool = None, __lowercase : int = None, __lowercase : PILImageResampling = None, __lowercase : bool = None, __lowercase : float = None, __lowercase : bool = None, __lowercase : Optional[Union[float, List[float]]] = None, __lowercase : Optional[Union[float, List[float]]] = None, __lowercase : Optional[Union[str, TensorType]] = None, __lowercase : ChannelDimension = ChannelDimension.FIRST, **__lowercase : Tuple, ):
lowercase__ = do_resize if do_resize is not None else self.do_resize
lowercase__ = size if size is not None else self.size
lowercase__ = get_size_dict(__lowercase )
lowercase__ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
lowercase__ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
lowercase__ = resample if resample is not None else self.resample
lowercase__ = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ = image_mean if image_mean is not None else self.image_mean
lowercase__ = image_std if image_std is not None else self.image_std
lowercase__ = make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
lowercase__ = [to_numpy_array(__lowercase ) for image in images]
if do_resize:
lowercase__ = [self.resize(image=__lowercase, size=__lowercase, resample=__lowercase ) for image in images]
if do_rescale:
lowercase__ = [self.rescale(image=__lowercase, scale=__lowercase ) for image in images]
if do_normalize:
lowercase__ = [self.normalize(image=__lowercase, mean=__lowercase, std=__lowercase ) for image in images]
lowercase__ = [to_channel_dimension_format(__lowercase, __lowercase ) for image in images]
lowercase__ = {"pixel_values": images}
return BatchFeature(data=__lowercase, tensor_type=__lowercase )
def A__ ( self : int, __lowercase : Optional[Any], __lowercase : List[Tuple] = None ):
lowercase__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__lowercase ) != len(__lowercase ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(__lowercase ):
lowercase__ = target_sizes.numpy()
lowercase__ = []
for idx in range(len(__lowercase ) ):
lowercase__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ), size=target_sizes[idx], mode="bilinear", align_corners=__lowercase )
lowercase__ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__lowercase )
else:
lowercase__ = logits.argmax(dim=1 )
lowercase__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 413
| 1
|
"""simple docstring"""
a__ : List[Any] = '''Input must be a string of 8 numbers plus letter'''
a__ : Dict = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__SCREAMING_SNAKE_CASE = f"""Expected string as input, found {type(SCREAMING_SNAKE_CASE_ ).__name__}"""
raise TypeError(SCREAMING_SNAKE_CASE_ )
__SCREAMING_SNAKE_CASE = spanish_id.replace("-" , "" ).upper()
if len(SCREAMING_SNAKE_CASE_ ) != 9:
raise ValueError(SCREAMING_SNAKE_CASE_ )
try:
__SCREAMING_SNAKE_CASE = int(spanish_id_clean[0:8] )
__SCREAMING_SNAKE_CASE = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(SCREAMING_SNAKE_CASE_ ) from ex
if letter.isdigit():
raise ValueError(SCREAMING_SNAKE_CASE_ )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase_ :
"""simple docstring"""
@staticmethod
def UpperCAmelCase_ ( *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Optional[Any] ) -> Optional[int]:
pass
@is_pipeline_test
@require_torch
@require_vision
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
snake_case__ : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] ) -> str:
__SCREAMING_SNAKE_CASE = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
__SCREAMING_SNAKE_CASE = [
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any ) -> Any:
__SCREAMING_SNAKE_CASE = vqa_pipeline(UpperCAmelCase__ , top_k=1 )
self.assertEqual(
UpperCAmelCase__ , [
[{"score": ANY(UpperCAmelCase__ ), "answer": ANY(UpperCAmelCase__ )}],
[{"score": ANY(UpperCAmelCase__ ), "answer": ANY(UpperCAmelCase__ )}],
] , )
@require_torch
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
__SCREAMING_SNAKE_CASE = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
__SCREAMING_SNAKE_CASE = "./tests/fixtures/tests_samples/COCO/000000039769.png"
__SCREAMING_SNAKE_CASE = "How many cats are there?"
__SCREAMING_SNAKE_CASE = vqa_pipeline(image=UpperCAmelCase__ , question="How many cats are there?" , top_k=2 )
self.assertEqual(
UpperCAmelCase__ , [{"score": ANY(UpperCAmelCase__ ), "answer": ANY(UpperCAmelCase__ )}, {"score": ANY(UpperCAmelCase__ ), "answer": ANY(UpperCAmelCase__ )}] )
__SCREAMING_SNAKE_CASE = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
UpperCAmelCase__ , [{"score": ANY(UpperCAmelCase__ ), "answer": ANY(UpperCAmelCase__ )}, {"score": ANY(UpperCAmelCase__ ), "answer": ANY(UpperCAmelCase__ )}] )
@slow
@require_torch
def UpperCAmelCase_ ( self : Tuple ) -> Any:
__SCREAMING_SNAKE_CASE = pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
__SCREAMING_SNAKE_CASE = "./tests/fixtures/tests_samples/COCO/000000039769.png"
__SCREAMING_SNAKE_CASE = "How many cats are there?"
__SCREAMING_SNAKE_CASE = vqa_pipeline(image=UpperCAmelCase__ , question=UpperCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=4 ) , [{"score": 0.8_799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
__SCREAMING_SNAKE_CASE = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=4 ) , [{"score": 0.8_799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
__SCREAMING_SNAKE_CASE = vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=4 ) , [[{"score": 0.8_799, "answer": "2"}, {"score": 0.296, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
pass
| 553
| 0
|
'''simple docstring'''
def snake_case ( a_ : list[int] ) -> list[list[int]]:
"""simple docstring"""
UpperCamelCase_ : int = []
if len(a_ ) == 1:
return [nums.copy()]
for _ in range(len(a_ ) ):
UpperCamelCase_ : Any = nums.pop(0 )
UpperCamelCase_ : Dict = permute(a_ )
for perm in permutations:
perm.append(a_ )
result.extend(a_ )
nums.append(a_ )
return result
def snake_case ( a_ : Any ) -> Any:
"""simple docstring"""
def backtrack(a_ : int ):
if start == len(a_ ) - 1:
output.append(nums[:] )
else:
for i in range(a_ , len(a_ ) ):
UpperCamelCase_ , UpperCamelCase_ : Any = nums[i], nums[start]
backtrack(start + 1 )
UpperCamelCase_ , UpperCamelCase_ : str = nums[i], nums[start] # backtrack
UpperCamelCase_ : Tuple = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
UpperCamelCase =permutea([1, 2, 3])
print(res)
doctest.testmod()
| 208
|
'''simple docstring'''
def snake_case ( a_ : list ) -> list:
"""simple docstring"""
UpperCamelCase_ : List[str] = False
while is_sorted is False: # Until all the indices are traversed keep looping
UpperCamelCase_ : Tuple = True
for i in range(0 , len(a_ ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
UpperCamelCase_ , UpperCamelCase_ : Optional[Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
UpperCamelCase_ : Any = False
for i in range(1 , len(a_ ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
UpperCamelCase_ , UpperCamelCase_ : List[str] = input_list[i + 1], input_list[i]
# swapping if elements not in order
UpperCamelCase_ : Dict = False
return input_list
if __name__ == "__main__":
print("Enter list to be sorted")
UpperCamelCase =[int(x) for x in input().split()]
# inputing elements of the list in one line
UpperCamelCase =odd_even_sort(input_list)
print("The sorted list is")
print(sorted_list)
| 208
| 1
|
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
SCREAMING_SNAKE_CASE__ : int = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def _a ( lowercase__ : Union[str, Any] , lowercase__ : List[str] , lowercase__ : List[Any] = 1_60_00 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = int(round(sample_rate * max_length ) )
if len(__snake_case ) <= sample_length:
return wav
SCREAMING_SNAKE_CASE__ : Optional[int] = randint(0 , len(__snake_case ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class snake_case :
lowercase_ = field(default=UpperCamelCase_ , metadata={'help': 'Name of a dataset from the datasets package'} )
lowercase_ = field(
default=UpperCamelCase_ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowercase_ = field(
default=UpperCamelCase_ , metadata={'help': 'A file containing the training audio paths and labels.'} )
lowercase_ = field(
default=UpperCamelCase_ , metadata={'help': 'A file containing the validation audio paths and labels.'} )
lowercase_ = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
lowercase_ = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
lowercase_ = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
lowercase_ = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''} )
lowercase_ = field(
default=UpperCamelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowercase_ = field(
default=UpperCamelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
lowercase_ = field(
default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class snake_case :
lowercase_ = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
lowercase_ = field(
default=UpperCamelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowercase_ = field(
default=UpperCamelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'} )
lowercase_ = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowercase_ = field(
default=UpperCamelCase_ , metadata={'help': 'Name or path of preprocessor config.'} )
lowercase_ = field(
default=UpperCamelCase_ , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'} )
lowercase_ = field(
default=UpperCamelCase_ , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'} )
lowercase_ = field(
default=UpperCamelCase_ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
lowercase_ = field(
default=UpperCamelCase_ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
lowercase_ = field(
default=UpperCamelCase_ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def __lowercase( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'The argument `--freeze_feature_extractor` is deprecated and '
'will be removed in a future version. Use `--freeze_feature_encoder`'
'instead. Setting `freeze_feature_encoder==True`.' , __lowerCAmelCase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'The argument `--freeze_feature_extractor` is deprecated and '
'should not be used in combination with `--freeze_feature_encoder`.'
'Only make use of `--freeze_feature_encoder`.' )
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_audio_classification' , __snake_case , __snake_case )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : List[str] = training_args.get_process_log_level()
logger.setLevel(__snake_case )
transformers.utils.logging.set_verbosity(__snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE__ : Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE__ : List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to train from scratch.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset and prepare it for the audio classification task.
SCREAMING_SNAKE_CASE__ : List[Any] = DatasetDict()
SCREAMING_SNAKE_CASE__ : str = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE__ : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'''--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
'Make sure to set `--audio_column_name` to the correct audio column - one of '
f'''{', '.join(raw_datasets['train'].column_names )}.''' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'''--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
'Make sure to set `--label_column_name` to the correct text column - one of '
f'''{', '.join(raw_datasets['train'].column_names )}.''' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
SCREAMING_SNAKE_CASE__ : List[Any] = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
SCREAMING_SNAKE_CASE__ : Optional[Any] = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
SCREAMING_SNAKE_CASE__ : int = feature_extractor.model_input_names[0]
def train_transforms(lowercase__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : Dict = []
for audio in batch[data_args.audio_column_name]:
SCREAMING_SNAKE_CASE__ : Any = random_subsample(
audio['array'] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(__snake_case )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feature_extractor(__snake_case , sampling_rate=feature_extractor.sampling_rate )
SCREAMING_SNAKE_CASE__ : Dict = {model_input_name: inputs.get(__snake_case )}
SCREAMING_SNAKE_CASE__ : str = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(lowercase__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = [audio['array'] for audio in batch[data_args.audio_column_name]]
SCREAMING_SNAKE_CASE__ : List[Any] = feature_extractor(__snake_case , sampling_rate=feature_extractor.sampling_rate )
SCREAMING_SNAKE_CASE__ : int = {model_input_name: inputs.get(__snake_case )}
SCREAMING_SNAKE_CASE__ : str = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
SCREAMING_SNAKE_CASE__ : Optional[int] = raw_datasets['train'].features[data_args.label_column_name].names
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = {}, {}
for i, label in enumerate(__snake_case ):
SCREAMING_SNAKE_CASE__ : str = str(__snake_case )
SCREAMING_SNAKE_CASE__ : Dict = label
# Load the accuracy metric from the datasets package
SCREAMING_SNAKE_CASE__ : Dict = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(lowercase__ : Any ):
SCREAMING_SNAKE_CASE__ : int = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=__snake_case , references=eval_pred.label_ids )
SCREAMING_SNAKE_CASE__ : List[str] = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(__snake_case ) , labelaid=__snake_case , idalabel=__snake_case , finetuning_task='audio-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE__ : Any = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
raw_datasets['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(__snake_case , output_all_columns=__snake_case )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE__ : List[str] = (
raw_datasets['eval'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(__snake_case , output_all_columns=__snake_case )
# Initialize our trainer
SCREAMING_SNAKE_CASE__ : int = Trainer(
model=__snake_case , args=__snake_case , train_dataset=raw_datasets['train'] if training_args.do_train else None , eval_dataset=raw_datasets['eval'] if training_args.do_eval else None , compute_metrics=__snake_case , tokenizer=__snake_case , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE__ : List[str] = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE__ : Tuple = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE__ : int = last_checkpoint
SCREAMING_SNAKE_CASE__ : int = trainer.train(resume_from_checkpoint=__snake_case )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
SCREAMING_SNAKE_CASE__ : Optional[int] = trainer.evaluate()
trainer.log_metrics('eval' , __snake_case )
trainer.save_metrics('eval' , __snake_case )
# Write model card and (optionally) push to hub
SCREAMING_SNAKE_CASE__ : str = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'audio-classification',
'dataset': data_args.dataset_name,
'tags': ['audio-classification'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__snake_case )
else:
trainer.create_model_card(**__snake_case )
if __name__ == "__main__":
main()
| 707
|
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case :
def __init__( self : List[Any] , a_ : Dict , a_ : Any=13 , a_ : Any=7 , a_ : Tuple=True , a_ : Tuple=True , a_ : Optional[int]=False , a_ : Dict=True , a_ : Optional[Any]=99 , a_ : Any=32 , a_ : Dict=5 , a_ : Tuple=4 , a_ : List[str]=37 , a_ : Union[str, Any]="gelu" , a_ : Dict=0.1 , a_ : Tuple=0.1 , a_ : List[str]=512 , a_ : List[str]=16 , a_ : List[str]=2 , a_ : Optional[int]=0.02 , a_ : List[str]=3 , a_ : Union[str, Any]=4 , a_ : Optional[Any]=None , )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = parent
SCREAMING_SNAKE_CASE__ : Dict = batch_size
SCREAMING_SNAKE_CASE__ : Dict = seq_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = is_training
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_input_mask
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE__ : int = use_labels
SCREAMING_SNAKE_CASE__ : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Dict = intermediate_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE__ : Any = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Tuple = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = num_labels
SCREAMING_SNAKE_CASE__ : Dict = num_choices
SCREAMING_SNAKE_CASE__ : str = scope
def __lowercase( self : Tuple )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Tuple = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : str = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase( self : Dict )-> Tuple:
"""simple docstring"""
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a_ , initializer_range=self.initializer_range , )
def __lowercase( self : Any , a_ : str , a_ : Tuple , a_ : Dict , a_ : Optional[int] , a_ : List[Any] , a_ : Union[str, Any] , a_ : Tuple )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = BioGptModel(config=a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ , attention_mask=a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase( self : List[Any] , a_ : Union[str, Any] , a_ : Optional[int] , a_ : Tuple , a_ : Optional[Any] , a_ : int , a_ : Optional[int] , a_ : int , a_ : str , a_ : Optional[Any] , )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = BioGptForCausalLM(config=a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Tuple = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase( self : Tuple , a_ : Optional[int] , a_ : Union[str, Any] , a_ : Any , a_ : Any , a_ : Optional[int] , *a_ : Tuple )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = BioGptModel(config=a_ )
model.to(a_ )
model.eval()
# create attention mask
SCREAMING_SNAKE_CASE__ : Any = torch.ones(input_ids.shape , dtype=torch.long , device=a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.seq_length // 2
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
# first forward pass
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ , attention_mask=a_ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
SCREAMING_SNAKE_CASE__ : str = ids_tensor((1,) , a_ ).item() + 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = random_other_next_tokens
# append to next input_ids and attn_mask
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ : Dict = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=a_ )] , dim=1 , )
# get two different outputs
SCREAMING_SNAKE_CASE__ : str = model(a_ , attention_mask=a_ )['last_hidden_state']
SCREAMING_SNAKE_CASE__ : List[str] = model(a_ , past_key_values=a_ , attention_mask=a_ )['last_hidden_state']
# select random slice
SCREAMING_SNAKE_CASE__ : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ : List[str] = output_from_no_past[:, -1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ : List[str] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a_ , a_ , atol=1e-3 ) )
def __lowercase( self : str , a_ : List[Any] , a_ : str , a_ : Union[str, Any] , a_ : Union[str, Any] , a_ : Optional[Any] , *a_ : List[str] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = BioGptModel(config=a_ ).to(a_ ).eval()
SCREAMING_SNAKE_CASE__ : Dict = torch.ones(input_ids.shape , dtype=torch.long , device=a_ )
# first forward pass
SCREAMING_SNAKE_CASE__ : Any = model(a_ , attention_mask=a_ , use_cache=a_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : int = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
SCREAMING_SNAKE_CASE__ : int = model(a_ , attention_mask=a_ )['last_hidden_state']
SCREAMING_SNAKE_CASE__ : List[str] = model(a_ , attention_mask=a_ , past_key_values=a_ )[
'last_hidden_state'
]
# select random slice
SCREAMING_SNAKE_CASE__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a_ , a_ , atol=1e-3 ) )
def __lowercase( self : Any , a_ : List[str] , a_ : Optional[int] , a_ : Any , a_ : Tuple , a_ : Any , *a_ : List[Any] , a_ : Union[str, Any]=False )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = BioGptForCausalLM(a_ )
model.to(a_ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
SCREAMING_SNAKE_CASE__ : Tuple = model(a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def __lowercase( self : Union[str, Any] , a_ : List[str] , *a_ : Optional[int] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = BioGptModel(a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def __lowercase( self : Dict , a_ : Tuple , a_ : Tuple , a_ : List[str] , a_ : Any , a_ : str , *a_ : str )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.num_labels
SCREAMING_SNAKE_CASE__ : str = BioGptForTokenClassification(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = model(a_ , attention_mask=a_ , token_type_ids=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowercase( self : Any )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE__ : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class snake_case ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
lowercase_ = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
lowercase_ = (BioGptForCausalLM,) if is_torch_available() else ()
lowercase_ = (
{
'feature-extraction': BioGptModel,
'text-classification': BioGptForSequenceClassification,
'text-generation': BioGptForCausalLM,
'token-classification': BioGptForTokenClassification,
'zero-shot': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ = False
def __lowercase( self : str )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = BioGptModelTester(self )
SCREAMING_SNAKE_CASE__ : List[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 )
def __lowercase( self : Tuple )-> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowercase( self : Optional[Any] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def __lowercase( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE__ : List[str] = type
self.model_tester.create_and_check_model(*a_ )
def __lowercase( self : int )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*a_ )
def __lowercase( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*a_ , gradient_checkpointing=a_ )
def __lowercase( self : Union[str, Any] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*a_ )
def __lowercase( self : Any )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*a_ )
def __lowercase( self : str )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*a_ )
@slow
def __lowercase( self : List[str] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(a_ )
SCREAMING_SNAKE_CASE__ : Dict = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
SCREAMING_SNAKE_CASE__ : List[str] = 'left'
# Define PAD Token = EOS Token = 50256
SCREAMING_SNAKE_CASE__ : Any = tokenizer.eos_token
SCREAMING_SNAKE_CASE__ : Tuple = model.config.eos_token_id
# use different length sentences to test batching
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
'Hello, my dog is a little',
'Today, I',
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer(a_ , return_tensors='pt' , padding=a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = inputs['input_ids'].to(a_ )
SCREAMING_SNAKE_CASE__ : List[str] = model.generate(
input_ids=a_ , attention_mask=inputs['attention_mask'].to(a_ ) , )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer(sentences[0] , return_tensors='pt' ).input_ids.to(a_ )
SCREAMING_SNAKE_CASE__ : Dict = model.generate(input_ids=a_ )
SCREAMING_SNAKE_CASE__ : Tuple = inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item()
SCREAMING_SNAKE_CASE__ : Dict = tokenizer(sentences[1] , return_tensors='pt' ).input_ids.to(a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = model.generate(input_ids=a_ , max_length=model.config.max_length - num_paddings )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.batch_decode(a_ , skip_special_tokens=a_ )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.decode(output_non_padded[0] , skip_special_tokens=a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = [
'Hello, my dog is a little bit bigger than a little bit.',
'Today, I have a good idea of how to use the information',
]
self.assertListEqual(a_ , a_ )
self.assertListEqual(a_ , [non_padded_sentence, padded_sentence] )
@slow
def __lowercase( self : Any )-> List[Any]:
"""simple docstring"""
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : List[Any] = BioGptModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def __lowercase( self : Optional[int] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : List[Any] = 3
SCREAMING_SNAKE_CASE__ : List[Any] = input_dict['input_ids']
SCREAMING_SNAKE_CASE__ : Union[str, Any] = input_ids.ne(1 ).to(a_ )
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : int = BioGptForSequenceClassification(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowercase( self : str )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : str = 3
SCREAMING_SNAKE_CASE__ : Any = 'multi_label_classification'
SCREAMING_SNAKE_CASE__ : Union[str, Any] = input_dict['input_ids']
SCREAMING_SNAKE_CASE__ : Any = input_ids.ne(1 ).to(a_ )
SCREAMING_SNAKE_CASE__ : str = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE__ : Dict = BioGptForSequenceClassification(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict = model(a_ , attention_mask=a_ , labels=a_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def __lowercase( self : Union[str, Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([[2, 4805, 9, 656, 21]] )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(a_ )[0]
SCREAMING_SNAKE_CASE__ : List[str] = 4_2384
SCREAMING_SNAKE_CASE__ : Dict = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , a_ )
SCREAMING_SNAKE_CASE__ : int = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , a_ , atol=1e-4 ) )
@slow
def __lowercase( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
SCREAMING_SNAKE_CASE__ : Dict = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(a_ )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer('COVID-19 is' , return_tensors='pt' ).to(a_ )
SCREAMING_SNAKE_CASE__ : int = model.generate(
**a_ , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=a_ , )
SCREAMING_SNAKE_CASE__ : int = tokenizer.decode(output_ids[0] , skip_special_tokens=a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'
' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'
' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'
' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'
' more than 800,000 deaths.'
)
self.assertEqual(a_ , a_ )
| 636
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class __magic_name__ ( __UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = "markuplm"
def __init__( self: Union[str, Any] , _lowerCamelCase: Tuple=3_05_22 , _lowerCamelCase: Union[str, Any]=7_68 , _lowerCamelCase: List[str]=12 , _lowerCamelCase: Tuple=12 , _lowerCamelCase: Any=30_72 , _lowerCamelCase: Tuple="gelu" , _lowerCamelCase: int=0.1 , _lowerCamelCase: str=0.1 , _lowerCamelCase: int=5_12 , _lowerCamelCase: Dict=2 , _lowerCamelCase: str=0.02 , _lowerCamelCase: Tuple=1E-12 , _lowerCamelCase: List[Any]=0 , _lowerCamelCase: Optional[int]=0 , _lowerCamelCase: List[str]=2 , _lowerCamelCase: int=2_56 , _lowerCamelCase: str=10_24 , _lowerCamelCase: List[Any]=2_16 , _lowerCamelCase: Optional[Any]=10_01 , _lowerCamelCase: Any=32 , _lowerCamelCase: Any=50 , _lowerCamelCase: Union[str, Any]="absolute" , _lowerCamelCase: Any=True , _lowerCamelCase: List[str]=None , **_lowerCamelCase: List[Any] , ):
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase , )
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = position_embedding_type
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = classifier_dropout
# additional properties
SCREAMING_SNAKE_CASE_ = max_depth
SCREAMING_SNAKE_CASE_ = max_xpath_tag_unit_embeddings
SCREAMING_SNAKE_CASE_ = max_xpath_subs_unit_embeddings
SCREAMING_SNAKE_CASE_ = tag_pad_id
SCREAMING_SNAKE_CASE_ = subs_pad_id
SCREAMING_SNAKE_CASE_ = xpath_unit_hidden_size
| 234
|
class __magic_name__ :
'''simple docstring'''
def __init__( self: Optional[int] ):
SCREAMING_SNAKE_CASE_ = {}
def _A ( self: Optional[Any] ):
print(self.vertex )
for i in self.vertex:
print(_lowerCamelCase , ''' -> ''' , ''' -> '''.join([str(_lowerCamelCase ) for j in self.vertex[i]] ) )
def _A ( self: Optional[int] , _lowerCamelCase: int , _lowerCamelCase: int ):
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(_lowerCamelCase )
else:
# else make a new vertex
SCREAMING_SNAKE_CASE_ = [to_vertex]
def _A ( self: int ):
# visited array for storing already visited nodes
SCREAMING_SNAKE_CASE_ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(_lowerCamelCase , _lowerCamelCase )
def _A ( self: Dict , _lowerCamelCase: int , _lowerCamelCase: list ):
# mark start vertex as visited
SCREAMING_SNAKE_CASE_ = True
print(_lowerCamelCase , end=''' ''' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 234
| 1
|
'''simple docstring'''
import os
import platform
import sys
UpperCamelCase_ = """3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 703
|
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = None
lowerCamelCase_ = BloomTokenizerFast
lowerCamelCase_ = BloomTokenizerFast
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = 'tokenizer_file'
lowerCamelCase_ = {'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>', 'pad_token': '<pad>'}
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
super().setUp()
lowercase : Union[str, Any] =BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : Union[str, Any] , **UpperCAmelCase__ : Any ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : str =self.get_rust_tokenizer()
lowercase : List[str] =['''The quick brown fox</s>''', '''jumps over the lazy dog</s>''']
lowercase : Any =[[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]]
lowercase : Any =tokenizer.batch_encode_plus(UpperCAmelCase__ )['''input_ids''']
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : int =tokenizer.batch_decode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : Any=6 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase : Optional[int] =self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowercase : Tuple ='''This is a simple input'''
lowercase : int =['''This is a simple input 1''', '''This is a simple input 2''']
lowercase : Optional[Any] =('''This is a simple input''', '''This is a pair''')
lowercase : int =[
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
try:
tokenizer_r.encode(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.batch_encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.encode(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.batch_encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
except ValueError:
self.fail('''Bloom Tokenizer should be able to deal with padding''' )
lowercase : Optional[int] =None # Hotfixing padding = None
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Union[str, Any] =self.get_rust_tokenizer()
lowercase : Dict =load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=UpperCAmelCase__ )
lowercase : Union[str, Any] =next(iter(UpperCAmelCase__ ) )['''premise'''] # pick up one data
lowercase : int =list(sample_data.values() )
lowercase : Any =list(map(tokenizer.encode , UpperCAmelCase__ ) )
lowercase : List[str] =[tokenizer.decode(UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ ) for x in output_tokens]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 88
| 0
|
'''simple docstring'''
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def _UpperCamelCase (_lowerCamelCase : Optional[int] )-> Dict:
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def _UpperCamelCase (_lowerCamelCase : int )-> List[Any]:
'''simple docstring'''
__snake_case = np.max(_outputs , axis=-1 , keepdims=_lowerCamelCase )
__snake_case = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_lowerCamelCase )
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : int = '''sigmoid'''
__lowercase : str = '''softmax'''
__lowercase : int = '''none'''
@add_end_docstrings(
__lowerCAmelCase , r'''
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `"default"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `"sigmoid"`: Applies the sigmoid function on the output.
- `"softmax"`: Applies the softmax function on the output.
- `"none"`: Does not apply any function on the output.
''' , )
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Optional[int] = False
__lowercase : Dict = ClassificationFunction.NONE
def __init__( self , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="" , **__SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__snake_case = tokenizer_kwargs
__snake_case = {}
if hasattr(self.model.config , '''return_all_scores''' ) and return_all_scores is None:
__snake_case = self.model.config.return_all_scores
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or top_k is None:
__snake_case = top_k
__snake_case = False
elif return_all_scores is not None:
warnings.warn(
'''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'''
''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''' , __SCREAMING_SNAKE_CASE , )
if return_all_scores:
__snake_case = None
else:
__snake_case = 1
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
__snake_case = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = super().__call__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
__snake_case = '''top_k''' not in kwargs
if isinstance(args[0] , __SCREAMING_SNAKE_CASE ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Dict[str, GenericTensor]:
'''simple docstring'''
__snake_case = self.framework
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return self.tokenizer(**__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) == 1 and isinstance(inputs[0] , __SCREAMING_SNAKE_CASE ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'''
''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''' )
return self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return self.model(**__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=True ) -> Optional[Any]:
'''simple docstring'''
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
__snake_case = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
__snake_case = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , '''function_to_apply''' ) and function_to_apply is None:
__snake_case = self.model.config.function_to_apply
else:
__snake_case = ClassificationFunction.NONE
__snake_case = model_outputs['''logits'''][0]
__snake_case = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
__snake_case = sigmoid(__SCREAMING_SNAKE_CASE )
elif function_to_apply == ClassificationFunction.SOFTMAX:
__snake_case = softmax(__SCREAMING_SNAKE_CASE )
elif function_to_apply == ClassificationFunction.NONE:
__snake_case = outputs
else:
raise ValueError(F'''Unrecognized `function_to_apply` argument: {function_to_apply}''' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
__snake_case = [
{'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(__SCREAMING_SNAKE_CASE )
]
if not _legacy:
dict_scores.sort(key=lambda __SCREAMING_SNAKE_CASE : x["score"] , reverse=__SCREAMING_SNAKE_CASE )
if top_k is not None:
__snake_case = dict_scores[:top_k]
return dict_scores
| 24
|
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = [[0 for _ in range(_lowerCamelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__snake_case = 1
for n in range(m + 1 ):
for k in range(1 , _lowerCamelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
UpperCAmelCase_ : List[str] = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
UpperCAmelCase_ : Union[str, Any] = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 24
| 1
|
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCAmelCase_ : Optional[int] = 16
lowerCAmelCase_ : List[Any] = 32
def _lowerCamelCase (__lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] = 16 , __lowerCamelCase : Dict = "bert-base-cased" ) -> int:
a__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
a__ = load_dataset("glue" , "mrpc" )
def tokenize_function(__lowerCamelCase : Dict ):
# max_length=None => use the model max length (it's actually the default)
a__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__lowerCamelCase , max_length=__lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
a__ = datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__lowerCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__lowerCamelCase : Optional[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__lowerCamelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(__lowerCamelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
a__ = DataLoader(
tokenized_datasets["train"] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
a__ = DataLoader(
tokenized_datasets["validation"] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
return train_dataloader, eval_dataloader
def _lowerCamelCase (__lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> List[str]:
# Initialize accelerator
a__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a__ = config["""lr"""]
a__ = int(config["num_epochs"] )
a__ = int(config["seed"] )
a__ = int(config["batch_size"] )
a__ = args.model_name_or_path
set_seed(__lowerCamelCase )
a__ = get_dataloaders(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a__ = AutoModelForSequenceClassification.from_pretrained(__lowerCamelCase , return_dict=__lowerCamelCase )
# Instantiate optimizer
a__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
a__ = optimizer_cls(params=model.parameters() , lr=__lowerCamelCase )
if accelerator.state.deepspeed_plugin is not None:
a__ = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
a__ = 1
a__ = (len(__lowerCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
a__ = get_linear_schedule_with_warmup(
optimizer=__lowerCamelCase , num_warmup_steps=0 , num_training_steps=__lowerCamelCase , )
else:
a__ = DummyScheduler(__lowerCamelCase , total_num_steps=__lowerCamelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a__ = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# We need to keep track of how many total steps we have iterated over
a__ = 0
# We also need to keep track of the stating epoch so files are named properly
a__ = 0
# Now we train the model
a__ = evaluate.load("glue" , "mrpc" )
a__ = 0
a__ = {}
for epoch in range(__lowerCamelCase , __lowerCamelCase ):
model.train()
for step, batch in enumerate(__lowerCamelCase ):
a__ = model(**__lowerCamelCase )
a__ = outputs.loss
a__ = loss / gradient_accumulation_steps
accelerator.backward(__lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
a__ = 0
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
a__ = model(**__lowerCamelCase )
a__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
a__ = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__lowerCamelCase ) - 1:
a__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
a__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__lowerCamelCase , references=__lowerCamelCase , )
a__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , __lowerCamelCase )
a__ = eval_metric["""accuracy"""]
if best_performance < eval_metric["accuracy"]:
a__ = eval_metric["""accuracy"""]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "all_results.json" ) , "w" ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase () -> List[str]:
a__ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=__lowerCamelCase , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__lowerCamelCase , )
parser.add_argument(
"--output_dir" , type=__lowerCamelCase , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--performance_lower_bound" , type=__lowerCamelCase , default=__lowerCamelCase , help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value." , )
parser.add_argument(
"--num_epochs" , type=__lowerCamelCase , default=3 , help="Number of train epochs." , )
a__ = parser.parse_args()
a__ = {"""lr""": 2e-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 714
|
'''simple docstring'''
def _lowerCamelCase (__lowerCamelCase : str ) -> bool:
a__ = 0
for ch in input_str:
a__ = ord(__lowerCamelCase )
a__ = pow(2 , __lowerCamelCase )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 289
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class lowerCamelCase_ :
"""simple docstring"""
a_ =42
a_ =None
a_ =None
def a_ ( ) -> Node | None:
__lowerCamelCase : Optional[Any] = Node(1 )
__lowerCamelCase : List[str] = Node(2 )
__lowerCamelCase : Optional[int] = Node(3 )
__lowerCamelCase : int = Node(4 )
__lowerCamelCase : Optional[int] = Node(5 )
return tree
def a_ ( _lowerCAmelCase ) -> list[int]:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def a_ ( _lowerCAmelCase ) -> list[int]:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def a_ ( _lowerCAmelCase ) -> list[int]:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def a_ ( _lowerCAmelCase ) -> int:
return (max(height(root.left ) ,height(root.right ) ) + 1) if root else 0
def a_ ( _lowerCAmelCase ) -> Sequence[Node | None]:
__lowerCamelCase : list[Any] = []
if root is None:
return output
__lowerCamelCase : Dict = deque([root] )
while process_queue:
__lowerCamelCase : Tuple = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Sequence[Node | None]:
__lowerCamelCase : list[Any] = []
def populate_output(_lowerCAmelCase ,_lowerCAmelCase ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left ,level - 1 )
populate_output(root.right ,level - 1 )
populate_output(_lowerCAmelCase ,_lowerCAmelCase )
return output
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Sequence[Node | None]:
__lowerCamelCase : list[Any] = []
def populate_output(_lowerCAmelCase ,_lowerCAmelCase ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right ,level - 1 )
populate_output(root.left ,level - 1 )
populate_output(_lowerCAmelCase ,_lowerCAmelCase )
return output
def a_ ( _lowerCAmelCase ) -> Sequence[Node | None] | list[Any]:
if root is None:
return []
__lowerCamelCase : list[Sequence[Node | None]] = []
__lowerCamelCase : List[Any] = 0
__lowerCamelCase : Dict = height(_lowerCAmelCase )
for h in range(1 ,height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(_lowerCAmelCase ,_lowerCAmelCase ) )
__lowerCamelCase : Any = 1
else:
output.append(get_nodes_from_right_to_left(_lowerCAmelCase ,_lowerCAmelCase ) )
__lowerCamelCase : Union[str, Any] = 0
return output
def a_ ( ) -> None: # Main function for testing.
__lowerCamelCase : str = make_tree()
print(F'In-order Traversal: {inorder(_lowerCAmelCase )}' )
print(F'Pre-order Traversal: {preorder(_lowerCAmelCase )}' )
print(F'Post-order Traversal: {postorder(_lowerCAmelCase )}' ,'\n' )
print(F'Height of Tree: {height(_lowerCAmelCase )}' ,'\n' )
print('Complete Level Order Traversal: ' )
print(level_order(_lowerCAmelCase ) ,'\n' )
print('Level-wise order Traversal: ' )
for level in range(1 ,height(_lowerCAmelCase ) + 1 ):
print(F'Level {level}:' ,get_nodes_from_left_to_right(_lowerCAmelCase ,level=_lowerCAmelCase ) )
print('\nZigZag order Traversal: ' )
print(zigzag(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 459
|
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def a_ ( _lowerCAmelCase ) -> List[str]:
__lowerCamelCase : int = min(_lowerCAmelCase ) # min() finds the minimum value
__lowerCamelCase : List[Any] = max(_lowerCAmelCase ) # max() finds the maximum value
__lowerCamelCase : List[Any] = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
__lowerCamelCase : List[Any] = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(_lowerCAmelCase ,_lowerCAmelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
__lowerCamelCase : Tuple = 0
for count in range(_lowerCAmelCase ):
while holes[count] > 0:
holes[count] -= 1
__lowerCamelCase : List[Any] = count + min_val
i += 1
def a_ ( ) -> str:
__lowerCamelCase : Optional[Any] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(_lowerCAmelCase )
print('Sorted order is:' ,' '.join(_lowerCAmelCase ) )
if __name__ == "__main__":
main()
| 459
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
def snake_case__ ( _A: Dict ) -> List[str]:
'''simple docstring'''
lowerCAmelCase = DPTConfig()
if "large" in checkpoint_url:
lowerCAmelCase = 1024
lowerCAmelCase = 4096
lowerCAmelCase = 24
lowerCAmelCase = 16
lowerCAmelCase = [5, 11, 17, 23]
lowerCAmelCase = [256, 512, 1024, 1024]
lowerCAmelCase = (1, 384, 384)
if "ade" in checkpoint_url:
lowerCAmelCase = True
lowerCAmelCase = 150
lowerCAmelCase = """huggingface/label-files"""
lowerCAmelCase = """ade20k-id2label.json"""
lowerCAmelCase = json.load(open(cached_download(hf_hub_url(_A , _A , repo_type="""dataset""" ) ) , """r""" ) )
lowerCAmelCase = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase = idalabel
lowerCAmelCase = {v: k for k, v in idalabel.items()}
lowerCAmelCase = [1, 150, 480, 480]
return config, expected_shape
def snake_case__ ( _A: Optional[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(_A , _A )
def snake_case__ ( _A: Optional[Any] ) -> Tuple:
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowerCAmelCase = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
lowerCAmelCase = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
lowerCAmelCase = name.replace("""patch_embed""" , """patch_embeddings""" )
if "pos_embed" in name:
lowerCAmelCase = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
lowerCAmelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
lowerCAmelCase = name.replace("""proj""" , """projection""" )
if "blocks" in name:
lowerCAmelCase = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
lowerCAmelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCAmelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name:
lowerCAmelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowerCAmelCase = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
lowerCAmelCase = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
lowerCAmelCase = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
lowerCAmelCase = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
lowerCAmelCase = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
lowerCAmelCase = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
lowerCAmelCase = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
lowerCAmelCase = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowerCAmelCase = name.replace(f"refinenet{layer_idx}" , f"fusion_stage.layers.{abs(layer_idx-4 )}" )
if "out_conv" in name:
lowerCAmelCase = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
lowerCAmelCase = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
lowerCAmelCase = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
lowerCAmelCase = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
lowerCAmelCase = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowerCAmelCase = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
lowerCAmelCase = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
lowerCAmelCase = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
lowerCAmelCase = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowerCAmelCase = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
lowerCAmelCase = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
lowerCAmelCase = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
lowerCAmelCase = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
lowerCAmelCase = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
lowerCAmelCase = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
lowerCAmelCase = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
lowerCAmelCase = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
lowerCAmelCase = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
lowerCAmelCase = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
lowerCAmelCase = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
lowerCAmelCase = name.replace("""auxlayer""" , """auxiliary_head.head""" )
return name
def snake_case__ ( _A: Tuple , _A: Optional[int] ) -> List[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.weight" )
lowerCAmelCase = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase = in_proj_weight[: config.hidden_size, :]
lowerCAmelCase = in_proj_bias[: config.hidden_size]
lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase = in_proj_bias[-config.hidden_size :]
def snake_case__ ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase = Image.open(requests.get(_A , stream=_A ).raw )
return im
@torch.no_grad()
def snake_case__ ( _A: Optional[int] , _A: Tuple , _A: str , _A: List[Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase = get_dpt_config(_A )
# load original state_dict from URL
lowerCAmelCase = torch.hub.load_state_dict_from_url(_A , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(_A )
# rename keys
for key in state_dict.copy().keys():
lowerCAmelCase = state_dict.pop(_A )
lowerCAmelCase = val
# read in qkv matrices
read_in_q_k_v(_A , _A )
# load HuggingFace model
lowerCAmelCase = DPTForSemanticSegmentation(_A ) if """ade""" in checkpoint_url else DPTForDepthEstimation(_A )
model.load_state_dict(_A )
model.eval()
# Check outputs on an image
lowerCAmelCase = 480 if """ade""" in checkpoint_url else 384
lowerCAmelCase = DPTImageProcessor(size=_A )
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(_A , return_tensors="""pt""" )
# forward pass
lowerCAmelCase = model(**_A ).logits if """ade""" in checkpoint_url else model(**_A ).predicted_depth
# Assert logits
lowerCAmelCase = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]] )
if "ade" in checkpoint_url:
lowerCAmelCase = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]] )
assert outputs.shape == torch.Size(_A )
assert (
torch.allclose(outputs[0, 0, :3, :3] , _A , atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , _A )
)
Path(_A ).mkdir(exist_ok=_A )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_A )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_A )
if push_to_hub:
print("""Pushing model to hub...""" )
model.push_to_hub(
repo_path_or_name=Path(_A , _A ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_A , )
image_processor.push_to_hub(
repo_path_or_name=Path(_A , _A ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_A , )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
__lowercase = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 705
|
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def snake_case__ ( _A: Optional[Any] , _A: List[Any]=0.999 , _A: str="cosine" , ) -> Union[str, Any]:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(_A: Any ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_A: List[str] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}" )
lowerCAmelCase = []
for i in range(_A ):
lowerCAmelCase = i / num_diffusion_timesteps
lowerCAmelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_A ) / alpha_bar_fn(_A ) , _A ) )
return torch.tensor(_A , dtype=torch.floataa )
class a__( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = [e.name for e in KarrasDiffusionSchedulers]
UpperCAmelCase_ : Union[str, Any] = 2
@register_to_config
def __init__( self , __lowerCAmelCase = 1000 , __lowerCAmelCase = 0.00085 , __lowerCAmelCase = 0.012 , __lowerCAmelCase = "linear" , __lowerCAmelCase = None , __lowerCAmelCase = "epsilon" , __lowerCAmelCase = "linspace" , __lowerCAmelCase = 0 , ):
"""simple docstring"""
if trained_betas is not None:
lowerCAmelCase = torch.tensor(__lowerCAmelCase , dtype=torch.floataa)
elif beta_schedule == "linear":
lowerCAmelCase = torch.linspace(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , dtype=torch.floataa)
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __lowerCAmelCase , dtype=torch.floataa) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase = betas_for_alpha_bar(__lowerCAmelCase)
else:
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
lowerCAmelCase = 1.0 - self.betas
lowerCAmelCase = torch.cumprod(self.alphas , dim=0)
# set all values
self.set_timesteps(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=None):
"""simple docstring"""
if schedule_timesteps is None:
lowerCAmelCase = self.timesteps
lowerCAmelCase = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter) == 0:
lowerCAmelCase = 1 if len(__lowerCAmelCase) > 1 else 0
else:
lowerCAmelCase = timestep.cpu().item() if torch.is_tensor(__lowerCAmelCase) else timestep
lowerCAmelCase = self._index_counter[timestep_int]
return indices[pos].item()
@property
def a_ ( self):
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = self.index_for_timestep(__lowerCAmelCase)
if self.state_in_first_order:
lowerCAmelCase = self.sigmas[step_index]
else:
lowerCAmelCase = self.sigmas_interpol[step_index]
lowerCAmelCase = sample / ((sigma**2 + 1) ** 0.5)
return sample
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , ):
"""simple docstring"""
lowerCAmelCase = num_inference_steps
lowerCAmelCase = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
lowerCAmelCase = np.linspace(0 , num_train_timesteps - 1 , __lowerCAmelCase , dtype=__lowerCAmelCase)[::-1].copy()
elif self.config.timestep_spacing == "leading":
lowerCAmelCase = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase = (np.arange(0 , __lowerCAmelCase) * step_ratio).round()[::-1].copy().astype(__lowerCAmelCase)
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
lowerCAmelCase = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase = (np.arange(__lowerCAmelCase , 0 , -step_ratio)).round().copy().astype(__lowerCAmelCase)
timesteps -= 1
else:
raise ValueError(
f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.")
lowerCAmelCase = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
lowerCAmelCase = torch.from_numpy(np.log(__lowerCAmelCase)).to(__lowerCAmelCase)
lowerCAmelCase = np.interp(__lowerCAmelCase , np.arange(0 , len(__lowerCAmelCase)) , __lowerCAmelCase)
lowerCAmelCase = np.concatenate([sigmas, [0.0]]).astype(np.floataa)
lowerCAmelCase = torch.from_numpy(__lowerCAmelCase).to(device=__lowerCAmelCase)
# interpolate sigmas
lowerCAmelCase = sigmas.log().lerp(sigmas.roll(1).log() , 0.5).exp()
lowerCAmelCase = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2), sigmas[-1:]])
lowerCAmelCase = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2), sigmas_interpol[-1:]])
if str(__lowerCAmelCase).startswith("""mps"""):
# mps does not support float64
lowerCAmelCase = torch.from_numpy(__lowerCAmelCase).to(__lowerCAmelCase , dtype=torch.floataa)
else:
lowerCAmelCase = torch.from_numpy(__lowerCAmelCase).to(__lowerCAmelCase)
# interpolate timesteps
lowerCAmelCase = self.sigma_to_t(__lowerCAmelCase).to(__lowerCAmelCase , dtype=timesteps.dtype)
lowerCAmelCase = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1).flatten()
lowerCAmelCase = torch.cat([timesteps[:1], interleaved_timesteps])
lowerCAmelCase = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
lowerCAmelCase = defaultdict(__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = sigma.log()
# get distribution
lowerCAmelCase = log_sigma - self.log_sigmas[:, None]
# get sigmas range
lowerCAmelCase = dists.ge(0).cumsum(dim=0).argmax(dim=0).clamp(max=self.log_sigmas.shape[0] - 2)
lowerCAmelCase = low_idx + 1
lowerCAmelCase = self.log_sigmas[low_idx]
lowerCAmelCase = self.log_sigmas[high_idx]
# interpolate sigmas
lowerCAmelCase = (low - log_sigma) / (low - high)
lowerCAmelCase = w.clamp(0 , 1)
# transform interpolation to time range
lowerCAmelCase = (1 - w) * low_idx + w * high_idx
lowerCAmelCase = t.view(sigma.shape)
return t
@property
def a_ ( self):
"""simple docstring"""
return self.sample is None
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = True , ):
"""simple docstring"""
lowerCAmelCase = self.index_for_timestep(__lowerCAmelCase)
# advance index counter by 1
lowerCAmelCase = timestep.cpu().item() if torch.is_tensor(__lowerCAmelCase) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
lowerCAmelCase = self.sigmas[step_index]
lowerCAmelCase = self.sigmas_interpol[step_index + 1]
lowerCAmelCase = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
lowerCAmelCase = self.sigmas[step_index - 1]
lowerCAmelCase = self.sigmas_interpol[step_index]
lowerCAmelCase = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
lowerCAmelCase = 0
lowerCAmelCase = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
lowerCAmelCase = sigma_hat if self.state_in_first_order else sigma_interpol
lowerCAmelCase = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
lowerCAmelCase = sigma_hat if self.state_in_first_order else sigma_interpol
lowerCAmelCase = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("""prediction_type not implemented yet: sample""")
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`")
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
lowerCAmelCase = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
lowerCAmelCase = sigma_interpol - sigma_hat
# store for 2nd order step
lowerCAmelCase = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
lowerCAmelCase = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
lowerCAmelCase = sigma_next - sigma_hat
lowerCAmelCase = self.sample
lowerCAmelCase = None
lowerCAmelCase = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype)
if original_samples.device.type == "mps" and torch.is_floating_point(__lowerCAmelCase):
# mps does not support float64
lowerCAmelCase = self.timesteps.to(original_samples.device , dtype=torch.floataa)
lowerCAmelCase = timesteps.to(original_samples.device , dtype=torch.floataa)
else:
lowerCAmelCase = self.timesteps.to(original_samples.device)
lowerCAmelCase = timesteps.to(original_samples.device)
lowerCAmelCase = [self.index_for_timestep(__lowerCAmelCase , __lowerCAmelCase) for t in timesteps]
lowerCAmelCase = sigmas[step_indices].flatten()
while len(sigma.shape) < len(original_samples.shape):
lowerCAmelCase = sigma.unsqueeze(-1)
lowerCAmelCase = original_samples + noise * sigma
return noisy_samples
def __len__( self):
"""simple docstring"""
return self.config.num_train_timesteps
| 605
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCamelCase = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 6
|
from __future__ import annotations
import math
def _A (UpperCamelCase : list , UpperCamelCase : list ) ->list:
'''simple docstring'''
if len(UpperCamelCase ) != 2 or len(a[0] ) != 2 or len(UpperCamelCase ) != 2 or len(b[0] ) != 2:
raise Exception("""Matrices are not 2x2""" )
lowerCamelCase__ : Union[str, Any] = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def _A (UpperCamelCase : list , UpperCamelCase : list ) ->int:
'''simple docstring'''
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(UpperCamelCase ) )
]
def _A (UpperCamelCase : list , UpperCamelCase : list ) ->Tuple:
'''simple docstring'''
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(UpperCamelCase ) )
]
def _A (UpperCamelCase : list ) ->tuple[list, list, list, list]:
'''simple docstring'''
if len(UpperCamelCase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("""Odd matrices are not supported!""" )
lowerCamelCase__ : List[Any] = len(UpperCamelCase )
lowerCamelCase__ : Tuple = matrix_length // 2
lowerCamelCase__ : Tuple = [[a[i][j] for j in range(UpperCamelCase , UpperCamelCase )] for i in range(UpperCamelCase )]
lowerCamelCase__ : Optional[int] = [
[a[i][j] for j in range(UpperCamelCase , UpperCamelCase )] for i in range(UpperCamelCase , UpperCamelCase )
]
lowerCamelCase__ : Union[str, Any] = [[a[i][j] for j in range(UpperCamelCase )] for i in range(UpperCamelCase )]
lowerCamelCase__ : int = [[a[i][j] for j in range(UpperCamelCase )] for i in range(UpperCamelCase , UpperCamelCase )]
return top_left, top_right, bot_left, bot_right
def _A (UpperCamelCase : list ) ->tuple[int, int]:
'''simple docstring'''
return len(UpperCamelCase ), len(matrix[0] )
def _A (UpperCamelCase : list ) ->None:
'''simple docstring'''
print("""\n""".join(str(UpperCamelCase ) for line in matrix ) )
def _A (UpperCamelCase : list , UpperCamelCase : list ) ->list:
'''simple docstring'''
if matrix_dimensions(UpperCamelCase ) == (2, 2):
return default_matrix_multiplication(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ : Optional[Any] = split_matrix(UpperCamelCase )
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ : Tuple = split_matrix(UpperCamelCase )
lowerCamelCase__ : Optional[Any] = actual_strassen(UpperCamelCase , matrix_subtraction(UpperCamelCase , UpperCamelCase ) )
lowerCamelCase__ : Any = actual_strassen(matrix_addition(UpperCamelCase , UpperCamelCase ) , UpperCamelCase )
lowerCamelCase__ : Tuple = actual_strassen(matrix_addition(UpperCamelCase , UpperCamelCase ) , UpperCamelCase )
lowerCamelCase__ : Optional[Any] = actual_strassen(UpperCamelCase , matrix_subtraction(UpperCamelCase , UpperCamelCase ) )
lowerCamelCase__ : List[str] = actual_strassen(matrix_addition(UpperCamelCase , UpperCamelCase ) , matrix_addition(UpperCamelCase , UpperCamelCase ) )
lowerCamelCase__ : Dict = actual_strassen(matrix_subtraction(UpperCamelCase , UpperCamelCase ) , matrix_addition(UpperCamelCase , UpperCamelCase ) )
lowerCamelCase__ : Tuple = actual_strassen(matrix_subtraction(UpperCamelCase , UpperCamelCase ) , matrix_addition(UpperCamelCase , UpperCamelCase ) )
lowerCamelCase__ : Dict = matrix_addition(matrix_subtraction(matrix_addition(UpperCamelCase , UpperCamelCase ) , UpperCamelCase ) , UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = matrix_addition(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Tuple = matrix_addition(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : str = matrix_subtraction(matrix_subtraction(matrix_addition(UpperCamelCase , UpperCamelCase ) , UpperCamelCase ) , UpperCamelCase )
# construct the new matrix from our 4 quadrants
lowerCamelCase__ : int = []
for i in range(len(UpperCamelCase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(UpperCamelCase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def _A (UpperCamelCase : list , UpperCamelCase : list ) ->list:
'''simple docstring'''
if matrix_dimensions(UpperCamelCase )[1] != matrix_dimensions(UpperCamelCase )[0]:
lowerCamelCase__ : List[str] = (
"""Unable to multiply these matrices, please check the dimensions.\n"""
f"Matrix A: {matrixa}\n"
f"Matrix B: {matrixa}"
)
raise Exception(UpperCamelCase )
lowerCamelCase__ : Optional[int] = matrix_dimensions(UpperCamelCase )
lowerCamelCase__ : List[str] = matrix_dimensions(UpperCamelCase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
lowerCamelCase__ : Optional[int] = max(*UpperCamelCase , *UpperCamelCase )
lowerCamelCase__ : str = int(math.pow(2 , math.ceil(math.loga(UpperCamelCase ) ) ) )
lowerCamelCase__ : Optional[Any] = matrixa
lowerCamelCase__ : List[str] = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , UpperCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , UpperCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , UpperCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
lowerCamelCase__ : str = actual_strassen(UpperCamelCase , UpperCamelCase )
# Removing the additional zeros
for i in range(0 , UpperCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , UpperCamelCase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
_lowercase = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
_lowercase = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 157
| 0
|
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_A : Union[str, Any] =logging.get_logger(__name__)
_A : int ={
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class _lowercase ( _lowercase ):
a = """trajectory_transformer"""
a = ["""past_key_values"""]
a = {
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self: Optional[Any] , UpperCamelCase__: Dict=100 , UpperCamelCase__: str=5 , UpperCamelCase__: Optional[Any]=1 , UpperCamelCase__: Union[str, Any]=1 , UpperCamelCase__: int=249 , UpperCamelCase__: int=6 , UpperCamelCase__: Any=17 , UpperCamelCase__: str=25 , UpperCamelCase__: Tuple=4 , UpperCamelCase__: Any=4 , UpperCamelCase__: Optional[int]=128 , UpperCamelCase__: Dict=0.1 , UpperCamelCase__: str=0.1 , UpperCamelCase__: Dict=0.1 , UpperCamelCase__: str=0.0_006 , UpperCamelCase__: Tuple=512 , UpperCamelCase__: Optional[int]=0.02 , UpperCamelCase__: Optional[int]=1e-12 , UpperCamelCase__: Tuple=1 , UpperCamelCase__: str=True , UpperCamelCase__: Optional[int]=1 , UpperCamelCase__: str=50_256 , UpperCamelCase__: str=50_256 , **UpperCamelCase__: Any , ):
lowerCamelCase__ : Tuple = vocab_size
lowerCamelCase__ : Dict = action_weight
lowerCamelCase__ : Optional[Any] = reward_weight
lowerCamelCase__ : Dict = value_weight
lowerCamelCase__ : Union[str, Any] = max_position_embeddings
lowerCamelCase__ : Optional[int] = block_size
lowerCamelCase__ : List[str] = action_dim
lowerCamelCase__ : Dict = observation_dim
lowerCamelCase__ : Union[str, Any] = transition_dim
lowerCamelCase__ : Tuple = learning_rate
lowerCamelCase__ : Tuple = n_layer
lowerCamelCase__ : str = n_head
lowerCamelCase__ : int = n_embd
lowerCamelCase__ : Dict = embd_pdrop
lowerCamelCase__ : Tuple = attn_pdrop
lowerCamelCase__ : List[str] = resid_pdrop
lowerCamelCase__ : List[Any] = initializer_range
lowerCamelCase__ : List[Any] = layer_norm_eps
lowerCamelCase__ : Any = kaiming_initializer_range
lowerCamelCase__ : Dict = use_cache
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 631
|
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
def __init__( self: List[str] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str]=13 , UpperCamelCase__: Optional[int]=30 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: List[str]=3 , UpperCamelCase__: List[str]=True , UpperCamelCase__: Any=True , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Any=5 , UpperCamelCase__: Optional[Any]=4 , UpperCamelCase__: Dict=37 , UpperCamelCase__: List[Any]="gelu" , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: List[Any]=10 , UpperCamelCase__: Tuple=0.02 , UpperCamelCase__: Optional[int]=3 , UpperCamelCase__: Dict=0.6 , UpperCamelCase__: int=None , ):
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : Optional[Any] = patch_size
lowerCamelCase__ : Any = num_channels
lowerCamelCase__ : Any = is_training
lowerCamelCase__ : Union[str, Any] = use_labels
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : List[str] = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : str = intermediate_size
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : Any = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : int = initializer_range
lowerCamelCase__ : List[str] = mask_ratio
lowerCamelCase__ : List[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowerCamelCase__ : str = (image_size // patch_size) ** 2
lowerCamelCase__ : Optional[int] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[int] = None
if self.use_labels:
lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Any = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: str ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[int] , UpperCamelCase__: int ):
lowerCamelCase__ : Tuple = ViTMAEModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: str , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict ):
lowerCamelCase__ : int = ViTMAEForPreTraining(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ )
lowerCamelCase__ : Any = (self.image_size // self.patch_size) ** 2
lowerCamelCase__ : str = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowerCamelCase__ : Dict = 1
lowerCamelCase__ : Optional[int] = ViTMAEForPreTraining(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
lowerCamelCase__ : Tuple = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Optional[int] = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any = config_and_inputs
lowerCamelCase__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
a = {"""feature-extraction""": ViTMAEModel} if is_torch_available() else {}
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Tuple = ViTMAEModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self: Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def lowerCamelCase_ ( self: Dict ):
pass
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : str = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase__ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Any = model_class(UpperCamelCase__ )
lowerCamelCase__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Any = [*signature.parameters.keys()]
lowerCamelCase__ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Dict , UpperCamelCase__: Optional[int] ):
# make masks reproducible
np.random.seed(2 )
lowerCamelCase__ : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
lowerCamelCase__ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCamelCase__ : Tuple = torch.from_numpy(UpperCamelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowerCamelCase__ : Tuple = pt_noise
super().check_pt_tf_models(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCamelCase__ : Optional[int] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Optional[int] = outputs[0].cpu().numpy()
lowerCamelCase__ : List[str] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : List[str] = model_class.from_pretrained(UpperCamelCase__ )
model.to(UpperCamelCase__ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
# Make sure we don't have nans
lowerCamelCase__ : Dict = after_outputs[0].cpu().numpy()
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase__ , 1e-5 )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self: Any ):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self: List[str] ):
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def lowerCamelCase_ ( self: Tuple ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase_ ( self: Optional[int] ):
pass
@slow
def lowerCamelCase_ ( self: List[str] ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Tuple = ViTMAEModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> List[Any]:
lowerCamelCase__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: List[str] ):
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self: Tuple ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowerCamelCase__ : str = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(UpperCamelCase__ )
lowerCamelCase__ : Tuple = self.default_image_processor
lowerCamelCase__ : List[str] = prepare_img()
lowerCamelCase__ : int = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowerCamelCase__ : List[str] = ViTMAEConfig()
lowerCamelCase__ : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowerCamelCase__ : Any = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
lowerCamelCase__ : List[Any] = model(**UpperCamelCase__ , noise=torch.from_numpy(UpperCamelCase__ ).to(device=UpperCamelCase__ ) )
# verify the logits
lowerCamelCase__ : List[str] = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : str = torch.tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCamelCase__ ) , atol=1e-4 ) )
| 631
| 1
|
class UpperCamelCase :
'''simple docstring'''
def __init__( self ):
lowercase_ :dict[str, TrieNode] = {} # Mapping from char to TrieNode
lowercase_ :Union[str, Any] = False
def UpperCamelCase ( self , UpperCamelCase_ ):
for word in words:
self.insert(UpperCamelCase_ )
def UpperCamelCase ( self , UpperCamelCase_ ):
lowercase_ :str = self
for char in word:
if char not in curr.nodes:
lowercase_ :int = TrieNode()
lowercase_ :Union[str, Any] = curr.nodes[char]
lowercase_ :str = True
def UpperCamelCase ( self , UpperCamelCase_ ):
lowercase_ :str = self
for char in word:
if char not in curr.nodes:
return False
lowercase_ :Optional[int] = curr.nodes[char]
return curr.is_leaf
def UpperCamelCase ( self , UpperCamelCase_ ):
def _delete(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> bool:
if index == len(UpperCamelCase_ ):
# If word does not exist
if not curr.is_leaf:
return False
lowercase_ :Dict = False
return len(curr.nodes ) == 0
lowercase_ :Any = word[index]
lowercase_ :Dict = curr.nodes.get(UpperCamelCase_ )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
lowercase_ :int = _delete(UpperCamelCase_ , UpperCamelCase_ , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , UpperCamelCase_ , 0 )
def UpperCamelCase ( _a , _a ) -> None:
'''simple docstring'''
if node.is_leaf:
print(_a , end=''' ''' )
for key, value in node.nodes.items():
print_words(_a , word + key )
def UpperCamelCase ( ) -> bool:
'''simple docstring'''
lowercase_ :str = '''banana bananas bandana band apple all beast'''.split()
lowercase_ :Any = TrieNode()
root.insert_many(_a )
# print_words(root, "")
assert all(root.find(_a ) for word in words )
assert root.find('''banana''' )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
assert root.find('''apple''' )
assert root.find('''all''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def UpperCamelCase ( _a , _a ) -> None:
'''simple docstring'''
print(str(_a ) , '''works!''' if passes else '''doesn\'t work :(''' )
def UpperCamelCase ( ) -> None:
'''simple docstring'''
assert test_trie()
def UpperCamelCase ( ) -> None:
'''simple docstring'''
print_results('''Testing trie functionality''' , test_trie() )
if __name__ == "__main__":
main()
| 257
|
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
SCREAMING_SNAKE_CASE : Any = False
try:
SCREAMING_SNAKE_CASE : List[Any] = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class UpperCamelCase :
'''simple docstring'''
def __init__( self , UpperCamelCase_ = None , UpperCamelCase_ = [] ):
lowercase_ :str = 0
lowercase_ :str = choices
lowercase_ :List[str] = prompt
if sys.platform == "win32":
lowercase_ :List[Any] = '''*'''
else:
lowercase_ :str = '''➔ '''
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , UpperCamelCase_ )
else:
forceWrite(self.choices[index] , UpperCamelCase_ )
def UpperCamelCase ( self , UpperCamelCase_ ):
if index == self.position:
forceWrite(f" {self.arrow_char} " )
self.write_choice(UpperCamelCase_ )
else:
forceWrite(f" {self.choices[index]}" )
reset_cursor()
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = 1 ):
lowercase_ :Optional[Any] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(UpperCamelCase_ )
move_cursor(UpperCamelCase_ , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['''up'''] )
def UpperCamelCase ( self ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP['''down'''] )
def UpperCamelCase ( self ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['''newline'''] )
def UpperCamelCase ( self ):
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
return self.position
@input.mark(KEYMAP['''interrupt'''] )
def UpperCamelCase ( self ):
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(UpperCamelCase_ )] for number in range(10 )] )
def UpperCamelCase ( self ):
lowercase_ :int = int(chr(self.current_selection ) )
lowercase_ :Optional[Any] = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , UpperCamelCase_ )
else:
return
else:
return
def UpperCamelCase ( self , UpperCamelCase_ = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , '''\n''' )
if in_colab:
forceWrite('''Please input a choice index (starting from 0), and press enter''' , '''\n''' )
else:
forceWrite('''Please select a choice using the arrow or number keys, and selecting with enter''' , '''\n''' )
lowercase_ :str = default_choice
for i in range(len(self.choices ) ):
self.print_choice(UpperCamelCase_ )
forceWrite('''\n''' )
move_cursor(len(self.choices ) - self.position , '''UP''' )
with cursor.hide():
while True:
if in_colab:
try:
lowercase_ :Optional[Any] = int(builtins.input() )
except ValueError:
lowercase_ :List[Any] = default_choice
else:
lowercase_ :List[str] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , '''UP''' )
clear_line()
self.write_choice(UpperCamelCase_ , '''\n''' )
return choice
| 257
| 1
|
from math import sqrt
def __lowerCAmelCase ( UpperCAmelCase__ : int = 1_0_0_0_0_0_0 ) -> int:
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(UpperCAmelCase__ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""")
| 103
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __A:
def __init__( self : str , __UpperCamelCase : Dict , __UpperCamelCase : Any=1_3 , __UpperCamelCase : Dict=7 , __UpperCamelCase : int=True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Dict=True , __UpperCamelCase : Dict=9_9 , __UpperCamelCase : Optional[int]=3_2 , __UpperCamelCase : List[Any]=2 , __UpperCamelCase : Optional[int]=4 , __UpperCamelCase : List[Any]=3_7 , __UpperCamelCase : Optional[Any]="gelu" , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : Any=0.1 , __UpperCamelCase : int=5_1_2 , __UpperCamelCase : List[Any]=1_6 , __UpperCamelCase : List[Any]=2 , __UpperCamelCase : List[str]=0.02 , __UpperCamelCase : Any=3 , __UpperCamelCase : int=4 , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : Union[str, Any]=0 , ):
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_input_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
lowerCamelCase_ = projection_dim
def lowercase__ ( self : Union[str, Any] ):
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , )
lowerCamelCase_ = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] ):
lowerCamelCase_ = TFDPRContextEncoder(config=__UpperCamelCase )
lowerCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )
lowerCamelCase_ = model(__UpperCamelCase , token_type_ids=__UpperCamelCase )
lowerCamelCase_ = model(__UpperCamelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowercase__ ( self : str , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] ):
lowerCamelCase_ = TFDPRQuestionEncoder(config=__UpperCamelCase )
lowerCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )
lowerCamelCase_ = model(__UpperCamelCase , token_type_ids=__UpperCamelCase )
lowerCamelCase_ = model(__UpperCamelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowercase__ ( self : str , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple , __UpperCamelCase : Any , __UpperCamelCase : Any , __UpperCamelCase : List[str] ):
lowerCamelCase_ = TFDPRReader(config=__UpperCamelCase )
lowerCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def lowercase__ ( self : Dict ):
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {"""input_ids""": input_ids}
return config, inputs_dict
@require_tf
class __A( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE = {'''feature-extraction''': TFDPRQuestionEncoder} if is_tf_available() else {}
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def lowercase__ ( self : Dict ):
lowerCamelCase_ = TFDPRModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=3_7 )
def lowercase__ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Any ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__UpperCamelCase )
def lowercase__ ( self : Dict ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__UpperCamelCase )
def lowercase__ ( self : List[str] ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__UpperCamelCase )
@slow
def lowercase__ ( self : Optional[int] ):
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFDPRContextEncoder.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFDPRContextEncoder.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFDPRQuestionEncoder.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFDPRReader.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@require_tf
class __A( unittest.TestCase ):
@slow
def lowercase__ ( self : Union[str, Any] ):
lowerCamelCase_ = TFDPRQuestionEncoder.from_pretrained("""facebook/dpr-question_encoder-single-nq-base""" )
lowerCamelCase_ = tf.constant(
[[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_0_3, 2_0_2_6, 3_8_9_9, 1_0_1_4_0, 1_0_2_9, 1_0_2]] ) # [CLS] hello, is my dog cute? [SEP]
lowerCamelCase_ = model(__UpperCamelCase )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
lowerCamelCase_ = tf.constant(
[
[
0.03236253,
0.12753335,
0.16818509,
0.00279786,
0.3896933,
0.24264945,
0.2178971,
-0.02335227,
-0.08481959,
-0.14324117,
]
] )
self.assertTrue(numpy.allclose(output[:, :1_0].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 103
| 1
|
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
return params[F"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="attention" ):
lowercase__ : Tuple = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
lowercase__ : Dict = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
lowercase__ : str = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
lowercase__ : List[str] = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
lowercase__ : Any = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
lowercase__ : Optional[int] = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
lowercase__ : List[str] = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
lowercase__ : List[Any] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ):
if split_mlp_wi:
lowercase__ : Any = params[F"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
lowercase__ : List[Any] = params[F"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
lowercase__ : Optional[Any] = (wi_a, wi_a)
else:
lowercase__ : Optional[int] = params[F"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
lowercase__ : List[Any] = params[F"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
return params[F"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def __UpperCamelCase ( UpperCAmelCase , *, UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = False ):
lowercase__ : Union[str, Any] = traverse_util.flatten_dict(variables['''target'''] )
lowercase__ : str = {"/".join(__lowerCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowercase__ : Any = "encoder/encoder/mlp/wi_0/kernel" in old
print('''Split MLP:''' , __lowerCamelCase )
lowercase__ : Tuple = collections.OrderedDict()
# Shared embeddings.
lowercase__ : Optional[int] = old["token_embedder/embedding"]
# Encoder.
for i in range(__lowerCamelCase ):
# Block i, layer 0 (Self Attention).
lowercase__ : Union[str, Any] = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , '''encoder''' , '''pre_attention_layer_norm''' )
lowercase__ : List[str] = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , '''encoder''' , '''attention''' )
lowercase__ : List[str] = layer_norm
lowercase__ : Union[str, Any] = k.T
lowercase__ : List[str] = o.T
lowercase__ : List[str] = q.T
lowercase__ : Union[str, Any] = v.T
# Block i, layer 1 (MLP).
lowercase__ : str = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , '''encoder''' , '''pre_mlp_layer_norm''' )
lowercase__ : Any = tax_mlp_lookup(__lowerCamelCase , __lowerCamelCase , '''encoder''' , __lowerCamelCase )
lowercase__ : str = layer_norm
if split_mlp_wi:
lowercase__ : Optional[int] = wi[0].T
lowercase__ : List[Any] = wi[1].T
else:
lowercase__ : Any = wi.T
lowercase__ : Union[str, Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase__ : List[Any] = tax_relpos_bias_lookup(
__lowerCamelCase , __lowerCamelCase , '''encoder''' ).T
lowercase__ : int = old["encoder/encoder_norm/scale"]
if not scalable_attention:
lowercase__ : Optional[int] = tax_relpos_bias_lookup(
__lowerCamelCase , 0 , '''encoder''' ).T
lowercase__ : Union[str, Any] = tax_relpos_bias_lookup(
__lowerCamelCase , 0 , '''decoder''' ).T
if not is_encoder_only:
# Decoder.
for i in range(__lowerCamelCase ):
# Block i, layer 0 (Self Attention).
lowercase__ : Tuple = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , '''decoder''' , '''pre_self_attention_layer_norm''' )
lowercase__ : Tuple = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , '''decoder''' , '''self_attention''' )
lowercase__ : int = layer_norm
lowercase__ : Tuple = k.T
lowercase__ : List[Any] = o.T
lowercase__ : str = q.T
lowercase__ : Optional[Any] = v.T
# Block i, layer 1 (Cross Attention).
lowercase__ : Optional[Any] = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , '''decoder''' , '''pre_cross_attention_layer_norm''' )
lowercase__ : List[Any] = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , '''decoder''' , '''encoder_decoder_attention''' )
lowercase__ : Optional[int] = layer_norm
lowercase__ : int = k.T
lowercase__ : Optional[Any] = o.T
lowercase__ : Dict = q.T
lowercase__ : List[Any] = v.T
# Block i, layer 2 (MLP).
lowercase__ : Dict = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , '''decoder''' , '''pre_mlp_layer_norm''' )
lowercase__ : Dict = tax_mlp_lookup(__lowerCamelCase , __lowerCamelCase , '''decoder''' , __lowerCamelCase )
lowercase__ : Dict = layer_norm
if split_mlp_wi:
lowercase__ : List[Any] = wi[0].T
lowercase__ : int = wi[1].T
else:
lowercase__ : Dict = wi.T
lowercase__ : Any = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase__ : Any = tax_relpos_bias_lookup(__lowerCamelCase , __lowerCamelCase , '''decoder''' ).T
lowercase__ : Dict = old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowercase__ : List[Any] = old["decoder/logits_dense/kernel"].T
return new
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Optional[Any] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowercase__ : int = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowercase__ : Optional[int] = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
lowercase__ : int = state_dict["shared.weight"]
return state_dict
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Union[str, Any] = checkpoints.load_tax_checkpoint(__lowerCamelCase )
lowercase__ : int = convert_tax_to_pytorch(
__lowerCamelCase , num_layers=config.num_layers , is_encoder_only=__lowerCamelCase , scalable_attention=__lowerCamelCase )
lowercase__ : List[str] = make_state_dict(__lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = False , UpperCAmelCase = False , ):
lowercase__ : str = MTaConfig.from_json_file(__lowerCamelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowercase__ : Optional[int] = UMTaEncoderModel(__lowerCamelCase )
else:
lowercase__ : int = UMTaForConditionalGeneration(__lowerCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__lowerCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(__lowerCamelCase )
print('''Done''' )
if __name__ == "__main__":
__a: Optional[Any] = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
__a: Optional[Any] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 152
|
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
_snake_case : int = logging.get_logger(__name__)
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , *lowerCamelCase : List[Any] , **lowerCamelCase : int ) -> None:
warnings.warn(
"The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ImageGPTImageProcessor instead." , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 81
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__UpperCamelCase : str = logging.get_logger(__name__)
class __magic_name__ ( __lowercase):
A: str = ['pixel_values']
def __init__( self : Optional[Any] , lowerCamelCase__ : str = True , lowerCamelCase__ : Union[str, Any] = None , lowerCamelCase__ : Any = PILImageResampling.BICUBIC , lowerCamelCase__ : Tuple = True , lowerCamelCase__ : Union[str, Any] = True , lowerCamelCase__ : Tuple = 1 / 255 , lowerCamelCase__ : Union[str, Any] = None , lowerCamelCase__ : str = True , lowerCamelCase__ : Any = None , lowerCamelCase__ : Tuple = None , **lowerCamelCase__ : str , ) -> None:
'''simple docstring'''
super().__init__(**__A )
UpperCamelCase__ : List[str] = size if size is not None else {'''height''': 224, '''width''': 224}
UpperCamelCase__ : List[Any] = get_size_dict(__A )
UpperCamelCase__ : Tuple = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCamelCase__ : List[Any] = get_size_dict(__A , default_to_square=__A , param_name='''crop_size''' )
UpperCamelCase__ : List[str] = do_resize
UpperCamelCase__ : Optional[Any] = do_rescale
UpperCamelCase__ : Optional[int] = do_normalize
UpperCamelCase__ : Any = do_center_crop
UpperCamelCase__ : List[str] = crop_size
UpperCamelCase__ : Tuple = size
UpperCamelCase__ : Tuple = resample
UpperCamelCase__ : Optional[int] = rescale_factor
UpperCamelCase__ : Optional[int] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCamelCase__ : Any = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase__ ( self : Dict , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : List[str] = PILImageResampling.BILINEAR , lowerCamelCase__ : Dict = None , **lowerCamelCase__ : Dict , ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ : List[str] = get_size_dict(__A )
if "shortest_edge" in size:
UpperCamelCase__ : Optional[int] = get_resize_output_image_size(__A , size=size['''shortest_edge'''] , default_to_square=__A )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
UpperCamelCase__ : int = (size['''height'''], size['''width'''])
else:
raise ValueError(F"Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}" )
return resize(__A , size=__A , resample=__A , data_format=__A , **__A )
def UpperCAmelCase__ ( self : int , lowerCamelCase__ : Dict , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int] = None , **lowerCamelCase__ : Optional[int] , ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ : Any = get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(__A , size=(size['''height'''], size['''width''']) , data_format=__A , **__A )
def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any] = None , **lowerCamelCase__ : Optional[Any] ) -> np.ndarray:
'''simple docstring'''
return rescale(__A , scale=__A , data_format=__A , **__A )
def UpperCAmelCase__ ( self : str , lowerCamelCase__ : Tuple , lowerCamelCase__ : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : List[Any] = None , **lowerCamelCase__ : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def UpperCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : int = None , lowerCamelCase__ : int = None , lowerCamelCase__ : Any = None , lowerCamelCase__ : Tuple = None , lowerCamelCase__ : List[Any] = None , lowerCamelCase__ : str = None , lowerCamelCase__ : Tuple = None , lowerCamelCase__ : Dict = None , lowerCamelCase__ : Optional[Any] = None , lowerCamelCase__ : Union[str, Any] = None , lowerCamelCase__ : int = None , lowerCamelCase__ : Tuple = ChannelDimension.FIRST , **lowerCamelCase__ : List[str] , ) -> BatchFeature:
'''simple docstring'''
UpperCamelCase__ : List[Any] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ : Any = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__ : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__ : str = get_size_dict(__A , param_name='''crop_size''' , default_to_square=__A )
UpperCamelCase__ : Any = resample if resample is not None else self.resample
UpperCamelCase__ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ : Union[str, Any] = image_std if image_std is not None else self.image_std
UpperCamelCase__ : Union[str, Any] = size if size is not None else self.size
UpperCamelCase__ : List[Any] = get_size_dict(__A )
if not is_batched(__A ):
UpperCamelCase__ : Optional[Any] = [images]
if not valid_images(__A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
UpperCamelCase__ : List[str] = [to_numpy_array(__A ) for image in images]
if do_resize:
UpperCamelCase__ : List[str] = [self.resize(image=__A , size=__A , resample=__A ) for image in images]
if do_center_crop:
UpperCamelCase__ : List[str] = [self.center_crop(image=__A , size=__A ) for image in images]
if do_rescale:
UpperCamelCase__ : List[Any] = [self.rescale(image=__A , scale=__A ) for image in images]
if do_normalize:
UpperCamelCase__ : Optional[Any] = [self.normalize(image=__A , mean=__A , std=__A ) for image in images]
UpperCamelCase__ : Dict = [to_channel_dimension_format(__A , __A ) for image in images]
UpperCamelCase__ : List[Any] = {'''pixel_values''': images}
return BatchFeature(data=__A , tensor_type=__A )
| 714
|
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__UpperCamelCase : int = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 106
| 0
|
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
A_ : List[str] = logging.getLogger(__name__)
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE=-1 ):
# in NER datasets, the last column is usually reserved for NER label
snake_case__ : List[str] = label_idx
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = mode.value
snake_case__ : List[str] = os.path.join(__SCREAMING_SNAKE_CASE , f"{mode}.txt" )
snake_case__ : List[str] = 1
snake_case__ : str = []
with open(__SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as f:
snake_case__ : Any = []
snake_case__ : Optional[Any] = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) )
guid_index += 1
snake_case__ : Any = []
snake_case__ : int = []
else:
snake_case__ : Any = line.split(""" """ )
words.append(splits[0] )
if len(__SCREAMING_SNAKE_CASE ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) )
return examples
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[int] = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(__SCREAMING_SNAKE_CASE )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
snake_case__ : Any = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(__SCREAMING_SNAKE_CASE )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
if path:
with open(__SCREAMING_SNAKE_CASE , """r""" ) as f:
snake_case__ : List[Any] = f.read().splitlines()
if "O" not in labels:
snake_case__ : Any = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ):
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
if path:
with open(__SCREAMING_SNAKE_CASE , """r""" ) as f:
snake_case__ : Optional[int] = f.read().splitlines()
if "O" not in labels:
snake_case__ : List[Any] = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = mode.value
snake_case__ : int = os.path.join(__SCREAMING_SNAKE_CASE , f"{mode}.txt" )
snake_case__ : Union[str, Any] = 1
snake_case__ : Any = []
with open(__SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as f:
for sentence in parse_incr(__SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = []
snake_case__ : Optional[Any] = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE )
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) )
guid_index += 1
return examples
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = 0
for sentence in parse_incr(__SCREAMING_SNAKE_CASE ):
snake_case__ : int = preds_list[example_id]
snake_case__ : Any = """"""
for token in sentence:
out += f"{token['form']} ({token['upos']}|{s_p.pop(0 )}) "
out += "\n"
writer.write(__SCREAMING_SNAKE_CASE )
example_id += 1
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
if path:
with open(__SCREAMING_SNAKE_CASE , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 38
|
"""simple docstring"""
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase (A__ ,unittest.TestCase ):
lowerCamelCase__ : List[str] = PhobertTokenizer
lowerCamelCase__ : Union[str, Any] = False
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__ = ["""T@@""", """i""", """I""", """R@@""", """r""", """e@@"""]
SCREAMING_SNAKE_CASE__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
SCREAMING_SNAKE_CASE__ = ["""#version: 0.2""", """l à</w>"""]
SCREAMING_SNAKE_CASE__ = {"""unk_token""": """<unk>"""}
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] , **__UpperCAmelCase : int ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : int ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = """Tôi là VinAI Research"""
SCREAMING_SNAKE_CASE__ = """T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>"""
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
SCREAMING_SNAKE_CASE__ = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE__ = """Tôi là VinAI Research"""
SCREAMING_SNAKE_CASE__ = """T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h""".split()
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__UpperCAmelCase )
print(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
| 196
| 0
|
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
_lowercase : Optional[Any] = [
"good first issue",
"feature request",
"wip",
]
def lowerCamelCase ( ) -> List[str]:
lowercase_ : List[Any] = Github(os.environ["""GITHUB_TOKEN"""] )
lowercase_ : Optional[Any] = g.get_repo("""huggingface/accelerate""" )
lowercase_ : Any = repo.get_issues(state="""open""" )
for issue in open_issues:
lowercase_ : str = sorted([comment for comment in issue.get_comments()] , key=lambda UpperCAmelCase__ : i.created_at , reverse=UpperCAmelCase__ )
lowercase_ : Optional[int] = comments[0] if len(UpperCAmelCase__ ) > 0 else None
lowercase_ : Any = dt.utcnow()
lowercase_ : List[str] = (current_time - issue.updated_at).days
lowercase_ : str = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="""closed""" )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 30
|
'''simple docstring'''
_lowercase : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def lowerCamelCase ( UpperCAmelCase__ : bytes ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : Union[str, Any] = F'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(UpperCAmelCase__ )
lowercase_ : Dict = """""".join(bin(UpperCAmelCase__ )[2:].zfill(8 ) for byte in data )
lowercase_ : Union[str, Any] = len(UpperCAmelCase__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase_ : List[Any] = b"""=""" * ((6 - len(UpperCAmelCase__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(UpperCAmelCase__ ) % 6)
else:
lowercase_ : Union[str, Any] = b""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(UpperCAmelCase__ ) , 6 ) ).encode()
+ padding
)
def lowerCamelCase ( UpperCAmelCase__ : str ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : List[str] = (
"""argument should be a bytes-like object or ASCII string, """
F'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(UpperCAmelCase__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
try:
lowercase_ : Optional[int] = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
lowercase_ : Any = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(UpperCAmelCase__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase_ : Optional[int] = encoded_data[:-padding]
lowercase_ : Any = """""".join(
bin(B64_CHARSET.index(UpperCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase_ : int = """""".join(
bin(B64_CHARSET.index(UpperCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )
lowercase_ : Optional[int] = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(UpperCAmelCase__ ) , 8 )
]
return bytes(UpperCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = ["""NllbTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = ["""NllbTokenizerFast"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 80
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : int = {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""",
}
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Union[str, Any] = 'lxmert'
__snake_case :Union[str, Any] = {}
def __init__( self : List[str] , _lowerCAmelCase : Dict=3_0522 , _lowerCAmelCase : List[str]=768 , _lowerCAmelCase : Union[str, Any]=12 , _lowerCAmelCase : Union[str, Any]=9500 , _lowerCAmelCase : Union[str, Any]=1600 , _lowerCAmelCase : Optional[Any]=400 , _lowerCAmelCase : Tuple=3072 , _lowerCAmelCase : List[Any]="gelu" , _lowerCAmelCase : int=0.1 , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : Tuple=512 , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : Optional[Any]=0.02 , _lowerCAmelCase : List[str]=1e-12 , _lowerCAmelCase : Any=9 , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : Any=5 , _lowerCAmelCase : Dict=2048 , _lowerCAmelCase : int=4 , _lowerCAmelCase : Optional[Any]=6.67 , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : int=True , _lowerCAmelCase : int=True , _lowerCAmelCase : str=True , _lowerCAmelCase : Dict=True , _lowerCAmelCase : int=True , _lowerCAmelCase : int=True , **_lowerCAmelCase : Tuple , ) -> Dict:
"""simple docstring"""
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = num_qa_labels
__lowercase = num_object_labels
__lowercase = num_attr_labels
__lowercase = l_layers
__lowercase = x_layers
__lowercase = r_layers
__lowercase = visual_feat_dim
__lowercase = visual_pos_dim
__lowercase = visual_loss_normalizer
__lowercase = task_matched
__lowercase = task_mask_lm
__lowercase = task_obj_predict
__lowercase = task_qa
__lowercase = visual_obj_loss
__lowercase = visual_attr_loss
__lowercase = visual_feat_loss
__lowercase = {"""vision""": r_layers, """cross_encoder""": x_layers, """language""": l_layers}
super().__init__(**_lowerCAmelCase )
| 80
| 1
|
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase ( lowercase_ ):
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=99 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=False , snake_case=True , snake_case="None" , snake_case=3 , snake_case=4 , snake_case=None , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = relative_attention
snake_case_ = position_biased_input
snake_case_ = pos_att_type
snake_case_ = scope
def a ( self ):
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a ( self ):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def a ( self , snake_case ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
snake_case_ = DebertaVaModel(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )[0]
snake_case_ = model(snake_case , token_type_ids=snake_case )[0]
snake_case_ = model(snake_case )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
snake_case_ = DebertaVaForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
snake_case_ = self.num_labels
snake_case_ = DebertaVaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
snake_case_ = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(snake_case )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
snake_case_ = self.num_labels
snake_case_ = DebertaVaForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
snake_case_ = DebertaVaForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , start_positions=snake_case , end_positions=snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
snake_case_ = DebertaVaForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a ( self ):
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( lowercase_ , lowercase_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
{
'''feature-extraction''': DebertaVaModel,
'''fill-mask''': DebertaVaForMaskedLM,
'''question-answering''': DebertaVaForQuestionAnswering,
'''text-classification''': DebertaVaForSequenceClassification,
'''token-classification''': DebertaVaForTokenClassification,
'''zero-shot''': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Dict = True
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : Optional[Any] = False
def a ( self ):
snake_case_ = DebertaVaModelTester(self )
snake_case_ = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def a ( self ):
self.config_tester.run_common_tests()
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*snake_case )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*snake_case )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*snake_case )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*snake_case )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*snake_case )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*snake_case )
@slow
def a ( self ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = DebertaVaModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def a ( self ):
pass
@slow
def a ( self ):
snake_case_ = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
snake_case_ = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
snake_case_ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
snake_case_ = model(snake_case , attention_mask=snake_case )[0]
# compare the actual values for a slice.
snake_case_ = torch.tensor(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case , atol=1e-4 ) , F'''{output[:, 1:4, 1:4]}''' )
| 706
|
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class lowercase :
__SCREAMING_SNAKE_CASE : int
__SCREAMING_SNAKE_CASE : Node | None = None
__SCREAMING_SNAKE_CASE : Node | None = None
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = Node(1 )
snake_case_ = Node(2 )
snake_case_ = Node(3 )
snake_case_ = Node(4 )
snake_case_ = Node(5 )
return tree
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = []
if root is None:
return output
snake_case_ = deque([root] )
while process_queue:
snake_case_ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = []
def populate_output(UpperCamelCase__ , UpperCamelCase__ ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(UpperCamelCase__ , UpperCamelCase__ )
return output
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = []
def populate_output(UpperCamelCase__ , UpperCamelCase__ ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(UpperCamelCase__ , UpperCamelCase__ )
return output
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
if root is None:
return []
snake_case_ = []
snake_case_ = 0
snake_case_ = height(UpperCamelCase__ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(UpperCamelCase__ , UpperCamelCase__ ) )
snake_case_ = 1
else:
output.append(get_nodes_from_right_to_left(UpperCamelCase__ , UpperCamelCase__ ) )
snake_case_ = 0
return output
def __lowerCamelCase ( ): # Main function for testing.
'''simple docstring'''
snake_case_ = make_tree()
print(F'''In-order Traversal: {inorder(UpperCamelCase__ )}''' )
print(F'''Pre-order Traversal: {preorder(UpperCamelCase__ )}''' )
print(F'''Post-order Traversal: {postorder(UpperCamelCase__ )}''' , '\n' )
print(F'''Height of Tree: {height(UpperCamelCase__ )}''' , '\n' )
print('Complete Level Order Traversal: ' )
print(level_order(UpperCamelCase__ ) , '\n' )
print('Level-wise order Traversal: ' )
for level in range(1 , height(UpperCamelCase__ ) + 1 ):
print(F'''Level {level}:''' , get_nodes_from_left_to_right(UpperCamelCase__ , level=UpperCamelCase__ ) )
print('\nZigZag order Traversal: ' )
print(zigzag(UpperCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 108
| 0
|
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ : Dict = logging.get_logger(__name__)
lowerCamelCase_ : Union[str, Any] = """▁"""
lowerCamelCase_ : Any = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
}
lowerCamelCase_ : List[Any] = {
"""vocab_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"""
),
},
"""spm_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"""
)
},
}
lowerCamelCase_ : List[str] = {
"""facebook/s2t-small-librispeech-asr""": 1_024,
}
lowerCamelCase_ : Any = ["""pt""", """fr""", """ru""", """nl""", """ro""", """it""", """es""", """de"""]
lowerCamelCase_ : Optional[Any] = {"""mustc""": MUSTC_LANGS}
class a__ ( __snake_case ):
A__ : int = VOCAB_FILES_NAMES
A__ : int = PRETRAINED_VOCAB_FILES_MAP
A__ : Union[str, Any] = MAX_MODEL_INPUT_SIZES
A__ : Union[str, Any] = ['input_ids', 'attention_mask']
A__ : List[int] = []
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<unk>" , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase = None , **UpperCAmelCase , ) -> None:
__a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , do_upper_case=UpperCAmelCase , do_lower_case=UpperCAmelCase , tgt_lang=UpperCAmelCase , lang_codes=UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase , )
__a = do_upper_case
__a = do_lower_case
__a = load_json(UpperCAmelCase )
__a = {v: k for k, v in self.encoder.items()}
__a = spm_file
__a = load_spm(UpperCAmelCase , self.sp_model_kwargs )
if lang_codes is not None:
__a = lang_codes
__a = LANGUAGES[lang_codes]
__a = [f'''<lang:{lang}>''' for lang in self.langs]
__a = {lang: self.sp_model.PieceToId(f'''<lang:{lang}>''' ) for lang in self.langs}
__a = self.lang_tokens
__a = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
__a = {}
@property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return len(self.encoder )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> str:
return self._tgt_lang
@tgt_lang.setter
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> None:
__a = new_tgt_lang
self.set_tgt_lang_special_tokens(UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> None:
__a = self.lang_code_to_id[tgt_lang]
__a = [lang_code_id]
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> List[str]:
return self.sp_model.encode(UpperCAmelCase , out_type=UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> List[str]:
return self.encoder.get(UpperCAmelCase , self.encoder[self.unk_token] )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> str:
return self.decoder.get(UpperCAmelCase , self.unk_token )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> str:
__a = []
__a = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
__a = self.sp_model.decode(UpperCAmelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
__a = []
else:
current_sub_tokens.append(UpperCAmelCase )
__a = self.sp_model.decode(UpperCAmelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase=None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
__a = [1] * len(self.prefix_tokens )
__a = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(UpperCAmelCase )) + ([0] * len(UpperCAmelCase )) + suffix_ones
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
__a = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
__a = self.__dict__.copy()
__a = None
return state
def __setstate__( self , UpperCAmelCase ) -> None:
__a = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__a = {}
__a = load_spm(self.spm_file , self.sp_model_kwargs )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
__a = Path(UpperCAmelCase )
assert save_dir.is_dir(), f'''{save_directory} should be a directory'''
__a = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
__a = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , UpperCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(UpperCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , UpperCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(UpperCAmelCase , 'wb' ) as fi:
__a = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase )
return (str(UpperCAmelCase ), str(UpperCAmelCase ))
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ):
__a = sentencepiece.SentencePieceProcessor(**__lowerCamelCase )
spm.Load(str(__lowerCamelCase ) )
return spm
def lowerCAmelCase( __lowerCamelCase ):
with open(__lowerCamelCase , 'r' ) as f:
return json.load(__lowerCamelCase )
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ):
with open(__lowerCamelCase , 'w' ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase , indent=2 )
| 559
|
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
lowerCamelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--original_config_file""",
type=str,
required=True,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--image_size""",
default=512,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
def lowerCAmelCase( __lowerCamelCase ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f'''could not parse string as bool {string}''' )
parser.add_argument(
"""--use_linear_projection""", help="""Override for use linear projection""", required=False, type=parse_bool
)
parser.add_argument("""--cross_attention_dim""", help="""Override for cross attention_dim""", required=False, type=int)
lowerCamelCase_ : str = parser.parse_args()
lowerCamelCase_ : str = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 559
| 1
|
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =['image_processor', 'tokenizer']
lowerCamelCase__ ='FlavaImageProcessor'
lowerCamelCase__ =('BertTokenizer', 'BertTokenizerFast')
def __init__(self , a_=None , a_=None , **a_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a_ , )
__snake_case : Union[str, Any] = kwargs.pop('''feature_extractor''' )
__snake_case : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a_ , a_ )
__snake_case : Union[str, Any] = self.image_processor
def __call__(self , a_ = None , a_ = None , a_ = True , a_ = False , a_ = False , a_ = None , a_ = 0 , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = False , a_ = False , a_ = False , a_ = False , a_ = True , a_ = None , **a_ , ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__snake_case : Dict = self.tokenizer(
text=a_ , add_special_tokens=a_ , padding=a_ , truncation=a_ , max_length=a_ , stride=a_ , pad_to_multiple_of=a_ , return_token_type_ids=a_ , return_attention_mask=a_ , return_overflowing_tokens=a_ , return_special_tokens_mask=a_ , return_offsets_mapping=a_ , return_length=a_ , verbose=a_ , return_tensors=a_ , **a_ , )
if images is not None:
__snake_case : Union[str, Any] = self.image_processor(
a_ , return_image_mask=a_ , return_codebook_pixels=a_ , return_tensors=a_ , **a_ , )
if text is not None and images is not None:
encoding.update(a_ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a_ , **a_ )
def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ):
'''simple docstring'''
return self.tokenizer.decode(*a_ , **a_ )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = self.tokenizer.model_input_names
__snake_case : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , a_ , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , a_ , )
return self.image_processor
| 707
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE : Optional[int] = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : str = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[int] = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 229
| 0
|
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F'Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F'Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=True ) -> List[str]:
'''simple docstring'''
model.train()
SCREAMING_SNAKE_CASE__ = model(a__ )
SCREAMING_SNAKE_CASE__ = F.mse_loss(a__ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(a__ )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_=False ) -> int:
'''simple docstring'''
set_seed(42 )
SCREAMING_SNAKE_CASE__ = RegressionModel()
SCREAMING_SNAKE_CASE__ = deepcopy(a__ )
SCREAMING_SNAKE_CASE__ = RegressionDataset(length=80 )
SCREAMING_SNAKE_CASE__ = DataLoader(a__ , batch_size=16 )
model.to(accelerator.device )
if sched:
SCREAMING_SNAKE_CASE__ = AdamW(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE__ = AdamW(params=ddp_model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE__ = LambdaLR(a__ , lr_lambda=lambda UpperCamelCase_ : epoch**0.65 )
SCREAMING_SNAKE_CASE__ = LambdaLR(a__ , lr_lambda=lambda UpperCamelCase_ : epoch**0.65 )
# Make a copy of `model`
if sched:
SCREAMING_SNAKE_CASE__ = accelerator.prepare(a__ , a__ , a__ , a__ )
else:
SCREAMING_SNAKE_CASE__ = accelerator.prepare(a__ , a__ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _lowercase ( UpperCamelCase_ ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = get_training_setup(a__ )
# Use a single batch
SCREAMING_SNAKE_CASE__ = next(iter(a__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(a__ , a__ , a__ , a__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(a__ ):
step_model(a__ , a__ , a__ , a__ )
else:
# Sync grads
step_model(a__ , a__ , a__ , a__ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(a__ , a__ , a__ , a__ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(a__ ) )]
def _lowercase ( UpperCamelCase_ ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = get_training_setup(a__ )
# Use a single batch
SCREAMING_SNAKE_CASE__ = next(iter(a__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(a__ , a__ , a__ , a__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(a__ ):
step_model(a__ , a__ , a__ , a__ )
else:
# Sync grads
step_model(a__ , a__ , a__ , a__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(a__ ) )]
def _lowercase ( UpperCamelCase_=False , UpperCamelCase_=False ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = Accelerator(
split_batches=a__ , dispatch_batches=a__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
SCREAMING_SNAKE_CASE__ = get_training_setup(a__ )
for iteration, batch in enumerate(a__ ):
SCREAMING_SNAKE_CASE__ = batch.values()
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(a__ , a__ , a__ , a__ , a__ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(a__ ):
step_model(a__ , a__ , a__ , a__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(a__ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(a__ ) )]
GradientState._reset_state()
def _lowercase ( UpperCamelCase_=False , UpperCamelCase_=False ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = Accelerator(
split_batches=a__ , dispatch_batches=a__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
SCREAMING_SNAKE_CASE__ = get_training_setup(a__ , a__ )
for iteration, batch in enumerate(a__ ):
SCREAMING_SNAKE_CASE__ = batch.values()
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(a__ , a__ , a__ , a__ , a__ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(a__ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(a__ ):
step_model(a__ , a__ , a__ , a__ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'
SCREAMING_SNAKE_CASE__ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(a__ ))
if accelerator.num_processes > 1:
check_model_parameters(a__ , a__ , a__ , a__ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def _lowercase ( ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = Accelerator()
SCREAMING_SNAKE_CASE__ = RegressionDataset(length=80 )
SCREAMING_SNAKE_CASE__ = DataLoader(a__ , batch_size=16 )
SCREAMING_SNAKE_CASE__ = RegressionDataset(length=96 )
SCREAMING_SNAKE_CASE__ = DataLoader(a__ , batch_size=16 )
SCREAMING_SNAKE_CASE__ = accelerator.prepare(a__ , a__ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(a__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(a__ )
if iteration < len(a__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(a__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(a__ )
if batch_num < len(a__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _lowercase ( ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = Accelerator()
SCREAMING_SNAKE_CASE__ = accelerator.state
if state.local_process_index == 0:
print('**Test `accumulate` gradient accumulation with dataloader break**' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('**Test NOOP `no_sync` context manager**' )
test_noop_sync(a__ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('**Test Distributed `no_sync` context manager**' )
test_distributed_sync(a__ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation, ' , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation(a__ , a__ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('<' , '2.0' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , '`split_batches=False`, `dispatch_batches=False`**' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation_with_opt_and_scheduler(a__ , a__ )
def _lowercase ( UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 472
|
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase )
class __UpperCamelCase ( UpperCamelCase ):
def __init__( self : Union[str, Any] , **UpperCAmelCase : List[Any] ) -> int:
super().__init__(**UpperCAmelCase )
if self.framework == "tf":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , 'vision' )
self.check_model_type(UpperCAmelCase )
def __call__( self : Optional[Any] , UpperCAmelCase : Union[str, "Image.Image", List[Dict[str, Any]]] , UpperCAmelCase : Union[str, List[str]] = None , **UpperCAmelCase : Dict , ) -> Any:
if "text_queries" in kwargs:
lowerCAmelCase :Dict = kwargs.pop('text_queries' )
if isinstance(UpperCAmelCase , (str, Image.Image) ):
lowerCAmelCase :List[str] = {'image': image, 'candidate_labels': candidate_labels}
else:
lowerCAmelCase :Union[str, Any] = image
lowerCAmelCase :Optional[int] = super().__call__(UpperCAmelCase , **UpperCAmelCase )
return results
def UpperCAmelCase__ ( self : List[Any] , **UpperCAmelCase : Tuple ) -> Optional[int]:
lowerCAmelCase :Any = {}
if "threshold" in kwargs:
lowerCAmelCase :int = kwargs['threshold']
if "top_k" in kwargs:
lowerCAmelCase :str = kwargs['top_k']
return {}, {}, postprocess_params
def UpperCAmelCase__ ( self : Any , UpperCAmelCase : Union[str, Any] ) -> str:
lowerCAmelCase :Dict = load_image(inputs['image'] )
lowerCAmelCase :Dict = inputs['candidate_labels']
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCAmelCase :str = candidate_labels.split(',' )
lowerCAmelCase :str = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(UpperCAmelCase ):
lowerCAmelCase :Any = self.tokenizer(UpperCAmelCase , return_tensors=self.framework )
lowerCAmelCase :Optional[Any] = self.image_processor(UpperCAmelCase , return_tensors=self.framework )
yield {
"is_last": i == len(UpperCAmelCase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def UpperCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase : str ) -> List[Any]:
lowerCAmelCase :Union[str, Any] = model_inputs.pop('target_size' )
lowerCAmelCase :List[str] = model_inputs.pop('candidate_label' )
lowerCAmelCase :Union[str, Any] = model_inputs.pop('is_last' )
lowerCAmelCase :int = self.model(**UpperCAmelCase )
lowerCAmelCase :Any = {'target_size': target_size, 'candidate_label': candidate_label, 'is_last': is_last, **outputs}
return model_outputs
def UpperCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int=0.1 , UpperCAmelCase : Optional[Any]=None ) -> str:
lowerCAmelCase :int = []
for model_output in model_outputs:
lowerCAmelCase :int = model_output['candidate_label']
lowerCAmelCase :List[Any] = BaseModelOutput(UpperCAmelCase )
lowerCAmelCase :Dict = self.image_processor.post_process_object_detection(
outputs=UpperCAmelCase , threshold=UpperCAmelCase , target_sizes=model_output['target_size'] )[0]
for index in outputs["scores"].nonzero():
lowerCAmelCase :Tuple = outputs['scores'][index].item()
lowerCAmelCase :int = self._get_bounding_box(outputs['boxes'][index][0] )
lowerCAmelCase :Optional[int] = {'score': score, 'label': label, 'box': box}
results.append(UpperCAmelCase )
lowerCAmelCase :Tuple = sorted(UpperCAmelCase , key=lambda UpperCAmelCase : x["score"] , reverse=UpperCAmelCase )
if top_k:
lowerCAmelCase :Dict = results[:top_k]
return results
def UpperCAmelCase__ ( self : Optional[Any] , UpperCAmelCase : "torch.Tensor" ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError('The ZeroShotObjectDetectionPipeline is only available in PyTorch.' )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase :Optional[int] = box.int().tolist()
lowerCAmelCase :Union[str, Any] = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 553
| 0
|
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCAmelCase_ :
def __init__( self, __a, __a=2, __a=32, __a=16, __a=3, __a=True, __a=True, __a=32, __a=4, __a=[0, 1, 2, 3], __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=0.02, __a=3, __a=[1, 384, 24, 24], __a=True, __a=None, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : Optional[int] = batch_size
_lowerCAmelCase : int = image_size
_lowerCAmelCase : Optional[int] = patch_size
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : Dict = is_training
_lowerCAmelCase : Tuple = use_labels
_lowerCAmelCase : int = hidden_size
_lowerCAmelCase : Dict = num_hidden_layers
_lowerCAmelCase : int = backbone_out_indices
_lowerCAmelCase : Optional[int] = num_attention_heads
_lowerCAmelCase : Any = intermediate_size
_lowerCAmelCase : Union[str, Any] = hidden_act
_lowerCAmelCase : Dict = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : Any = num_labels
_lowerCAmelCase : Union[str, Any] = backbone_featmap_shape
_lowerCAmelCase : List[str] = scope
_lowerCAmelCase : str = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase : Any = (image_size // patch_size) ** 2
_lowerCAmelCase : Optional[int] = num_patches + 1
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : str = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels)
_lowerCAmelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [96, 192, 384, 768],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, backbone_out_indices=self.backbone_out_indices, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__a, initializer_range=self.initializer_range, is_hybrid=self.is_hybrid, backbone_config=__a, backbone_featmap_shape=self.backbone_featmap_shape, )
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = DPTModel(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Optional[Any] = model(__a)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Any = self.num_labels
_lowerCAmelCase : Tuple = DPTForDepthEstimation(__a)
model.to(__a)
model.eval()
_lowerCAmelCase : int = model(__a)
self.parent.assertEqual(result.predicted_depth.shape, (self.batch_size, self.image_size, self.image_size))
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.num_labels
_lowerCAmelCase : str = DPTForSemanticSegmentation(__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Tuple = model(__a, labels=__a)
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = config_and_inputs
_lowerCAmelCase : List[str] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowerCamelCase__ = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = DPTModelTester(self)
_lowerCAmelCase : int = ConfigTester(self, config_class=__a, has_text_modality=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds")
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[str] = model_class(__a)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
_lowerCAmelCase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a, nn.Linear))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : int = model_class(__a)
_lowerCAmelCase : Union[str, Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Tuple = [*signature.parameters.keys()]
_lowerCAmelCase : int = ["pixel_values"]
self.assertListEqual(arg_names[:1], __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__a)
def snake_case__ ( self):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_lowerCAmelCase , _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Any = True
if model_class in get_values(__a):
continue
_lowerCAmelCase : Any = model_class(__a)
model.to(__a)
model.train()
_lowerCAmelCase : Union[str, Any] = self._prepare_for_class(__a, __a, return_labels=__a)
_lowerCAmelCase : int = model(**__a).loss
loss.backward()
def snake_case__ ( self):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_lowerCAmelCase , _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : int = False
_lowerCAmelCase : Any = True
if model_class in get_values(__a) or not model_class.supports_gradient_checkpointing:
continue
_lowerCAmelCase : Optional[Any] = model_class(__a)
model.to(__a)
model.gradient_checkpointing_enable()
model.train()
_lowerCAmelCase : List[str] = self._prepare_for_class(__a, __a, return_labels=__a)
_lowerCAmelCase : List[Any] = model(**__a).loss
loss.backward()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Union[str, Any] = _config_zero_init(__a)
for model_class in self.all_model_classes:
_lowerCAmelCase : Tuple = model_class(config=__a)
# Skip the check for the backbone
_lowerCAmelCase : str = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_lowerCAmelCase : Dict = [f"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def snake_case__ ( self):
'''simple docstring'''
pass
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_lowerCAmelCase : List[Any] = DPTModel.from_pretrained(__a)
self.assertIsNotNone(__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : int = "add"
with self.assertRaises(__a):
_lowerCAmelCase : Dict = DPTForDepthEstimation(__a)
def A ( ):
'''simple docstring'''
_lowerCAmelCase : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas")
_lowerCAmelCase : Optional[int] = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas").to(__a)
_lowerCAmelCase : Dict = prepare_img()
_lowerCAmelCase : List[str] = image_processor(images=__a, return_tensors="pt").to(__a)
# forward pass
with torch.no_grad():
_lowerCAmelCase : Union[str, Any] = model(**__a)
_lowerCAmelCase : Optional[Any] = outputs.predicted_depth
# verify the predicted depth
_lowerCAmelCase : List[str] = torch.Size((1, 384, 384))
self.assertEqual(predicted_depth.shape, __a)
_lowerCAmelCase : Tuple = torch.tensor(
[[[5.6_437, 5.6_146, 5.6_511], [5.4_371, 5.5_649, 5.5_958], [5.5_215, 5.5_184, 5.5_293]]]).to(__a)
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100, __a, atol=1E-4))
| 658
|
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
_lowerCAmelCase : str = BeautifulSoup(requests.get(_lowerCamelCase ).text , "html.parser" )
_lowerCAmelCase : str = soup.findAll("h1" )
_lowerCAmelCase : Optional[int] = soup.findAll("div" , {"class": "maincounter-number"} )
keys += soup.findAll("span" , {"class": "panel-title"} )
values += soup.findAll("div" , {"class": "number-table-main"} )
return {key.text.strip(): value.text.strip() for key, value in zip(_lowerCamelCase , _lowerCamelCase )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 658
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a (unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase : Any , lowerCamelCase : str=7 , lowerCamelCase : List[str]=3 , lowerCamelCase : int=18 , lowerCamelCase : List[str]=30 , lowerCamelCase : Any=400 , lowerCamelCase : Optional[int]=True , lowerCamelCase : List[Any]=None , lowerCamelCase : Tuple=True , lowerCamelCase : Optional[int]=None , lowerCamelCase : Any=True , ) -> str:
__snake_case : Dict = size if size is not None else {"shortest_edge": 20}
__snake_case : Dict = crop_size if crop_size is not None else {"height": 18, "width": 18}
__snake_case : Optional[Any] = parent
__snake_case : Optional[int] = batch_size
__snake_case : Optional[Any] = num_channels
__snake_case : List[str] = image_size
__snake_case : Optional[int] = min_resolution
__snake_case : Optional[Any] = max_resolution
__snake_case : List[Any] = do_resize
__snake_case : List[Any] = size
__snake_case : Optional[int] = do_center_crop
__snake_case : int = crop_size
__snake_case : List[str] = do_flip_channel_order
def __snake_case ( self : Tuple ) -> List[str]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class a (_lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = MobileViTImageProcessor if is_vision_available() else None
def __snake_case ( self : Union[str, Any] ) -> str:
__snake_case : int = MobileViTImageProcessingTester(self )
@property
def __snake_case ( self : Optional[Any] ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self : Tuple ) -> Any:
__snake_case : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "size" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_center_crop" ) )
self.assertTrue(hasattr(lowerCamelCase , "center_crop" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_flip_channel_order" ) )
def __snake_case ( self : List[Any] ) -> Union[str, Any]:
__snake_case : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
__snake_case : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def __snake_case ( self : Optional[Any] ) -> Optional[Any]:
pass
def __snake_case ( self : Union[str, Any] ) -> int:
# Initialize image_processing
__snake_case : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
__snake_case : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__snake_case : Dict = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __snake_case ( self : Tuple ) -> List[Any]:
# Initialize image_processing
__snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
__snake_case : Any = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__snake_case : Tuple = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __snake_case ( self : Dict ) -> int:
# Initialize image_processing
__snake_case : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
__snake_case : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__snake_case : Any = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 81
|
"""simple docstring"""
def lowerCAmelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int ):
"""simple docstring"""
return x if y == 0 else greatest_common_divisor(UpperCamelCase__ , x % y )
def lowerCAmelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int ):
"""simple docstring"""
return (x * y) // greatest_common_divisor(UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase_ ( UpperCamelCase__ : int = 20 ):
"""simple docstring"""
__lowercase = 1
for i in range(1 , n + 1 ):
__lowercase = lcm(UpperCamelCase__ , UpperCamelCase__ )
return g
if __name__ == "__main__":
print(f"""{solution() = }""")
| 616
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""SEW_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SEWForCTC""",
"""SEWForSequenceClassification""",
"""SEWModel""",
"""SEWPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 709
|
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def A( snake_case_ , snake_case_ , snake_case_ = 1 / sqrt(2 ) ):
"""simple docstring"""
lowercase__: Dict = tau * frequency / samplerate
lowercase__: List[str] = sin(snake_case_ )
lowercase__: Union[str, Any] = cos(snake_case_ )
lowercase__: Optional[Any] = _sin / (2 * q_factor)
lowercase__: int = (1 - _cos) / 2
lowercase__: Tuple = 1 - _cos
lowercase__: List[Any] = 1 + alpha
lowercase__: Any = -2 * _cos
lowercase__: Dict = 1 - alpha
lowercase__: Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def A( snake_case_ , snake_case_ , snake_case_ = 1 / sqrt(2 ) ):
"""simple docstring"""
lowercase__: str = tau * frequency / samplerate
lowercase__: Dict = sin(snake_case_ )
lowercase__: Dict = cos(snake_case_ )
lowercase__: Tuple = _sin / (2 * q_factor)
lowercase__: Any = (1 + _cos) / 2
lowercase__: str = -1 - _cos
lowercase__: Any = 1 + alpha
lowercase__: List[str] = -2 * _cos
lowercase__: Optional[Any] = 1 - alpha
lowercase__: Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def A( snake_case_ , snake_case_ , snake_case_ = 1 / sqrt(2 ) ):
"""simple docstring"""
lowercase__: List[Any] = tau * frequency / samplerate
lowercase__: Optional[int] = sin(snake_case_ )
lowercase__: List[Any] = cos(snake_case_ )
lowercase__: Any = _sin / (2 * q_factor)
lowercase__: Any = _sin / 2
lowercase__: Optional[Any] = 0
lowercase__: Any = -ba
lowercase__: Optional[int] = 1 + alpha
lowercase__: Any = -2 * _cos
lowercase__: Any = 1 - alpha
lowercase__: Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def A( snake_case_ , snake_case_ , snake_case_ = 1 / sqrt(2 ) ):
"""simple docstring"""
lowercase__: List[str] = tau * frequency / samplerate
lowercase__: Tuple = sin(snake_case_ )
lowercase__: List[str] = cos(snake_case_ )
lowercase__: Union[str, Any] = _sin / (2 * q_factor)
lowercase__: List[str] = 1 - alpha
lowercase__: Optional[Any] = -2 * _cos
lowercase__: str = 1 + alpha
lowercase__: List[Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def A( snake_case_ , snake_case_ , snake_case_ , snake_case_ = 1 / sqrt(2 ) , ):
"""simple docstring"""
lowercase__: Tuple = tau * frequency / samplerate
lowercase__: Tuple = sin(snake_case_ )
lowercase__: Optional[Any] = cos(snake_case_ )
lowercase__: str = _sin / (2 * q_factor)
lowercase__: Optional[Any] = 10 ** (gain_db / 40)
lowercase__: Union[str, Any] = 1 + alpha * big_a
lowercase__: str = -2 * _cos
lowercase__: Tuple = 1 - alpha * big_a
lowercase__: Union[str, Any] = 1 + alpha / big_a
lowercase__: Dict = -2 * _cos
lowercase__: Optional[Any] = 1 - alpha / big_a
lowercase__: Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def A( snake_case_ , snake_case_ , snake_case_ , snake_case_ = 1 / sqrt(2 ) , ):
"""simple docstring"""
lowercase__: Optional[Any] = tau * frequency / samplerate
lowercase__: Union[str, Any] = sin(snake_case_ )
lowercase__: Optional[Any] = cos(snake_case_ )
lowercase__: Optional[int] = _sin / (2 * q_factor)
lowercase__: Optional[int] = 10 ** (gain_db / 40)
lowercase__: List[Any] = (big_a + 1) - (big_a - 1) * _cos
lowercase__: Union[str, Any] = (big_a + 1) + (big_a - 1) * _cos
lowercase__: Any = (big_a - 1) - (big_a + 1) * _cos
lowercase__: str = (big_a - 1) + (big_a + 1) * _cos
lowercase__: int = 2 * sqrt(snake_case_ ) * alpha
lowercase__: Union[str, Any] = big_a * (pmc + aaa)
lowercase__: List[Any] = 2 * big_a * mpc
lowercase__: Dict = big_a * (pmc - aaa)
lowercase__: Dict = ppmc + aaa
lowercase__: List[str] = -2 * pmpc
lowercase__: int = ppmc - aaa
lowercase__: Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def A( snake_case_ , snake_case_ , snake_case_ , snake_case_ = 1 / sqrt(2 ) , ):
"""simple docstring"""
lowercase__: List[str] = tau * frequency / samplerate
lowercase__: Dict = sin(snake_case_ )
lowercase__: Optional[Any] = cos(snake_case_ )
lowercase__: Tuple = _sin / (2 * q_factor)
lowercase__: int = 10 ** (gain_db / 40)
lowercase__: Dict = (big_a + 1) - (big_a - 1) * _cos
lowercase__: Optional[int] = (big_a + 1) + (big_a - 1) * _cos
lowercase__: Tuple = (big_a - 1) - (big_a + 1) * _cos
lowercase__: Dict = (big_a - 1) + (big_a + 1) * _cos
lowercase__: Dict = 2 * sqrt(snake_case_ ) * alpha
lowercase__: Optional[int] = big_a * (ppmc + aaa)
lowercase__: Dict = -2 * big_a * pmpc
lowercase__: Dict = big_a * (ppmc - aaa)
lowercase__: Tuple = pmc + aaa
lowercase__: Optional[int] = 2 * mpc
lowercase__: Union[str, Any] = pmc - aaa
lowercase__: Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 120
| 0
|
'''simple docstring'''
import math
def snake_case__ ( UpperCamelCase ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(UpperCamelCase ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case__ ( UpperCamelCase = 0.1 ) -> int:
_UpperCamelCase : List[str] = 3
_UpperCamelCase : Union[str, Any] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 ,(j + 2) * (j + 2) ,j + 1 ):
primes += is_prime(UpperCamelCase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 683
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowercase ( self ) -> Dict:
torch.manual_seed(0 )
_UpperCamelCase : Any = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def _lowercase ( self ) -> Tuple:
torch.manual_seed(0 )
_UpperCamelCase : Optional[Any] = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def _lowercase ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
_UpperCamelCase : Optional[int] = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
_UpperCamelCase : int = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def _lowercase ( self ) -> Any:
_UpperCamelCase : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase : Tuple = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
_UpperCamelCase : int = DDPMScheduler()
_UpperCamelCase : Optional[int] = AudioDiffusionPipeline(vqvae=_snake_case , unet=self.dummy_unet , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : List[Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_UpperCamelCase : Tuple = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Optional[int] = pipe(generator=_snake_case , steps=4 )
_UpperCamelCase : Union[str, Any] = output.audios[0]
_UpperCamelCase : Union[str, Any] = output.images[0]
_UpperCamelCase : str = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : int = pipe(generator=_snake_case , steps=4 , return_dict=_snake_case )
_UpperCamelCase : int = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
_UpperCamelCase : List[str] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : List[str] = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : int = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
_UpperCamelCase : str = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
_UpperCamelCase : Dict = DDIMScheduler()
_UpperCamelCase : str = self.dummy_vqvae_and_unet
_UpperCamelCase : List[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : List[Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
np.random.seed(0 )
_UpperCamelCase : Optional[Any] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
_UpperCamelCase : Optional[Any] = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Tuple = pipe(raw_audio=_snake_case , generator=_snake_case , start_step=5 , steps=10 )
_UpperCamelCase : List[str] = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
_UpperCamelCase : Any = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Tuple = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
_UpperCamelCase : Any = self.dummy_unet_condition
_UpperCamelCase : List[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=_snake_case , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : Union[str, Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
np.random.seed(0 )
_UpperCamelCase : int = torch.rand((1, 1, 10) )
_UpperCamelCase : Optional[Any] = pipe(generator=_snake_case , encoding=_snake_case )
_UpperCamelCase : Dict = output.images[0]
_UpperCamelCase : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Any = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ) -> Any:
_UpperCamelCase : Optional[int] = torch_device
_UpperCamelCase : int = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
_UpperCamelCase : str = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_UpperCamelCase : Tuple = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Optional[int] = pipe(generator=_snake_case )
_UpperCamelCase : List[Any] = output.audios[0]
_UpperCamelCase : List[Any] = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
_UpperCamelCase : Union[str, Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Union[str, Any] = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 683
| 1
|
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _a( UpperCamelCase__ : str, UpperCamelCase__ : Tuple, UpperCamelCase__ : Any, UpperCamelCase__ : List[str], UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
with open(UpperCamelCase__ ) as metadata_file:
SCREAMING_SNAKE_CASE__ : Optional[int] =json.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =LukeConfig(use_entity_aware_attention=UpperCamelCase__, **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch.load(UpperCamelCase__, map_location='''cpu''' )['''module''']
# Load the entity vocab file
SCREAMING_SNAKE_CASE__ : List[str] =load_original_entity_vocab(UpperCamelCase__ )
# add an entry for [MASK2]
SCREAMING_SNAKE_CASE__ : Optional[int] =max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
SCREAMING_SNAKE_CASE__ : Optional[int] =XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE__ : List[Any] =AddedToken('''<ent>''', lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any =AddedToken('''<ent2>''', lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__, '''tokenizer_config.json''' ), '''r''' ) as f:
SCREAMING_SNAKE_CASE__ : Optional[Any] =json.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : int ='''MLukeTokenizer'''
with open(os.path.join(UpperCamelCase__, '''tokenizer_config.json''' ), '''w''' ) as f:
json.dump(UpperCamelCase__, UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__, MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ), '''w''' ) as f:
json.dump(UpperCamelCase__, UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =MLukeTokenizer.from_pretrained(UpperCamelCase__ )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE__ : str =tokenizer.convert_tokens_to_ids(['''@'''] )[0]
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.convert_tokens_to_ids(['''#'''] )[0]
SCREAMING_SNAKE_CASE__ : Dict =state_dict['''embeddings.word_embeddings.weight''']
SCREAMING_SNAKE_CASE__ : List[str] =word_emb[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Tuple =word_emb[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : str =torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =state_dict[bias_name]
SCREAMING_SNAKE_CASE__ : List[Any] =decoder_bias[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : str =decoder_bias[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : List[str] =torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE__ : Tuple =f"encoder.layer.{layer_index}.attention.self."
SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE__ : List[Any] =state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE__ : Any =state_dict['''entity_embeddings.entity_embeddings.weight''']
SCREAMING_SNAKE_CASE__ : Any =entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Any =torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict['''entity_predictions.bias''']
SCREAMING_SNAKE_CASE__ : Tuple =entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Any =torch.cat([entity_prediction_bias, entity_mask_bias] )
SCREAMING_SNAKE_CASE__ : int =LukeForMaskedLM(config=UpperCamelCase__ ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
SCREAMING_SNAKE_CASE__ : Tuple =OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
SCREAMING_SNAKE_CASE__ : Optional[Any] =state_dict[key]
else:
SCREAMING_SNAKE_CASE__ : Any =state_dict[key]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =model.load_state_dict(UpperCamelCase__, strict=UpperCamelCase__ )
if set(UpperCamelCase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"Unexpected unexpected_keys: {unexpected_keys}" )
if set(UpperCamelCase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"Unexpected missing_keys: {missing_keys}" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
SCREAMING_SNAKE_CASE__ : Any =MLukeTokenizer.from_pretrained(UpperCamelCase__, task='''entity_classification''' )
SCREAMING_SNAKE_CASE__ : str ='''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(0, 9)
SCREAMING_SNAKE_CASE__ : str =tokenizer(UpperCamelCase__, entity_spans=[span], return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : List[Any] =model(**UpperCamelCase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE__ : str =torch.Size((1, 3_3, 7_6_8) )
SCREAMING_SNAKE_CASE__ : int =torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3], UpperCamelCase__, atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE__ : Any =torch.Size((1, 1, 7_6_8) )
SCREAMING_SNAKE_CASE__ : int =torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
f" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], UpperCamelCase__, atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
SCREAMING_SNAKE_CASE__ : str =MLukeTokenizer.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''Tokyo is the capital of <mask>.'''
SCREAMING_SNAKE_CASE__ : Dict =(2_4, 3_0)
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer(UpperCamelCase__, entity_spans=[span], return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : List[str] =model(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =encoding['''input_ids'''][0].tolist()
SCREAMING_SNAKE_CASE__ : Any =input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
SCREAMING_SNAKE_CASE__ : List[Any] =outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =outputs.entity_logits[0][0].argmax().item()
SCREAMING_SNAKE_CASE__ : Dict =[
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(UpperCamelCase__ ) )
model.save_pretrained(UpperCamelCase__ )
def _a( UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =['''[MASK]''', '''[PAD]''', '''[UNK]''']
SCREAMING_SNAKE_CASE__ : List[str] =[json.loads(UpperCamelCase__ ) for line in open(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : Optional[int] ={}
for entry in data:
SCREAMING_SNAKE_CASE__ : Tuple =entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
SCREAMING_SNAKE_CASE__ : str =entity_id
break
SCREAMING_SNAKE_CASE__ : Union[str, Any] =f"{language}:{entity_name}"
SCREAMING_SNAKE_CASE__ : Union[str, Any] =entity_id
return new_mapping
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 665
|
'''simple docstring'''
from math import isqrt
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =[True] * max_number
for i in range(2, isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2, UpperCamelCase__, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Any =False
return [i for i in range(2, UpperCamelCase__ ) if is_prime[i]]
def _a( UpperCamelCase__ : int = 1_0**8 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =calculate_prime_numbers(max_number // 2 )
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : Optional[int] =len(UpperCamelCase__ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 665
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a__ : Any = {
'configuration_xlm': ['XLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMConfig', 'XLMOnnxConfig'],
'tokenization_xlm': ['XLMTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
'XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMForMultipleChoice',
'XLMForQuestionAnswering',
'XLMForQuestionAnsweringSimple',
'XLMForSequenceClassification',
'XLMForTokenClassification',
'XLMModel',
'XLMPreTrainedModel',
'XLMWithLMHeadModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = [
'TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMForMultipleChoice',
'TFXLMForQuestionAnsweringSimple',
'TFXLMForSequenceClassification',
'TFXLMForTokenClassification',
'TFXLMMainLayer',
'TFXLMModel',
'TFXLMPreTrainedModel',
'TFXLMWithLMHeadModel',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
a__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 51
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase__ ( __lowerCAmelCase ,unittest.TestCase ):
lowerCAmelCase__ : int = KandinskyVaaControlnetPipeline
lowerCAmelCase__ : Union[str, Any] = ["image_embeds", "negative_image_embeds", "hint"]
lowerCAmelCase__ : Any = ["image_embeds", "negative_image_embeds", "hint"]
lowerCAmelCase__ : Dict = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
lowerCAmelCase__ : Optional[int] = False
@property
def __a ( self : Dict ):
'''simple docstring'''
return 3_2
@property
def __a ( self : Tuple ):
'''simple docstring'''
return 3_2
@property
def __a ( self : Optional[int] ):
'''simple docstring'''
return self.time_input_dim
@property
def __a ( self : Any ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __a ( self : Optional[int] ):
'''simple docstring'''
return 1_0_0
@property
def __a ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
a__ = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
a__ = UNetaDConditionModel(**lowerCamelCase )
return model
@property
def __a ( self : Optional[Any] ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __a ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
a__ = VQModel(**self.dummy_movq_kwargs )
return model
def __a ( self : str ):
'''simple docstring'''
a__ = self.dummy_unet
a__ = self.dummy_movq
a__ = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="linear" , beta_start=0.00085 , beta_end=0.012 , clip_sample=lowerCamelCase , set_alpha_to_one=lowerCamelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=lowerCamelCase , )
a__ = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __a ( self : List[Any] , lowerCamelCase : str , lowerCamelCase : Any=0 ):
'''simple docstring'''
a__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
a__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowerCamelCase )
# create hint
a__ = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
if str(lowerCamelCase ).startswith("mps" ):
a__ = torch.manual_seed(lowerCamelCase )
else:
a__ = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
a__ = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 6_4,
"width": 6_4,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def __a ( self : Any ):
'''simple docstring'''
a__ = "cpu"
a__ = self.get_dummy_components()
a__ = self.pipeline_class(**lowerCamelCase )
a__ = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
a__ = pipe(**self.get_dummy_inputs(lowerCamelCase ) )
a__ = output.images
a__ = pipe(
**self.get_dummy_inputs(lowerCamelCase ) , return_dict=lowerCamelCase , )[0]
a__ = image[0, -3:, -3:, -1]
a__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
a__ = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
def __a ( self : List[Any] ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : Tuple ):
'''simple docstring'''
a__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy" )
a__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png" )
a__ = torch.from_numpy(np.array(lowerCamelCase ) ).float() / 255.0
a__ = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
a__ = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase )
a__ = KandinskyVaaControlnetPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa )
a__ = pipeline.to(lowerCamelCase )
pipeline.set_progress_bar_config(disable=lowerCamelCase )
a__ = "A robot, 4k photo"
a__ = torch.Generator(device="cuda" ).manual_seed(0 )
a__ , a__ = pipe_prior(
lowerCamelCase , generator=lowerCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
a__ = torch.Generator(device="cuda" ).manual_seed(0 )
a__ = pipeline(
image_embeds=lowerCamelCase , negative_image_embeds=lowerCamelCase , hint=lowerCamelCase , generator=lowerCamelCase , num_inference_steps=1_0_0 , output_type="np" , )
a__ = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
| 489
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowercase_ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class SCREAMING_SNAKE_CASE__ ( _lowercase ):
A : Tuple = ['''pixel_values''']
def __init__( self : Union[str, Any] , _lowerCAmelCase : bool = True , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _lowerCAmelCase : bool = True , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : bool = True , _lowerCAmelCase : Union[int, float] = 1 / 2_55 , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : bool = True , **_lowerCAmelCase : Optional[Any] , ):
super().__init__(**A_ )
__snake_case : Dict = size if size is not None else {"""shortest_edge""": 2_24}
__snake_case : int = get_size_dict(A_ , default_to_square=A_ )
__snake_case : Any = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
__snake_case : Optional[int] = get_size_dict(A_ , default_to_square=A_ , param_name="""crop_size""" )
__snake_case : Optional[int] = do_resize
__snake_case : Tuple = size
__snake_case : List[Any] = resample
__snake_case : Dict = do_center_crop
__snake_case : str = crop_size
__snake_case : Any = do_rescale
__snake_case : int = rescale_factor
__snake_case : Optional[Any] = do_normalize
__snake_case : Tuple = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__snake_case : str = image_std if image_std is not None else OPENAI_CLIP_STD
__snake_case : int = do_convert_rgb
def snake_case__ ( self : List[str] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Dict[str, int] , _lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : Union[str, Any] , ):
__snake_case : Dict = get_size_dict(A_ , default_to_square=A_ )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
__snake_case : Union[str, Any] = get_resize_output_image_size(A_ , size=size["""shortest_edge"""] , default_to_square=A_ )
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ )
def snake_case__ ( self : str , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Dict[str, int] , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : str , ):
__snake_case : int = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(A_ , size=(size["""height"""], size["""width"""]) , data_format=A_ , **A_ )
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Union[int, float] , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : Union[str, Any] , ):
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def snake_case__ ( self : List[str] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Union[float, List[float]] , _lowerCAmelCase : Union[float, List[float]] , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : List[Any] , ):
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def snake_case__ ( self : Dict , _lowerCAmelCase : ImageInput , _lowerCAmelCase : bool = None , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : PILImageResampling = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : int = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : float = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : Optional[Union[str, TensorType]] = None , _lowerCAmelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **_lowerCAmelCase : Dict , ):
__snake_case : Tuple = do_resize if do_resize is not None else self.do_resize
__snake_case : Dict = size if size is not None else self.size
__snake_case : Optional[int] = get_size_dict(A_ , param_name="""size""" , default_to_square=A_ )
__snake_case : int = resample if resample is not None else self.resample
__snake_case : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
__snake_case : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
__snake_case : int = get_size_dict(A_ , param_name="""crop_size""" , default_to_square=A_ )
__snake_case : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
__snake_case : str = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case : int = do_normalize if do_normalize is not None else self.do_normalize
__snake_case : int = image_mean if image_mean is not None else self.image_mean
__snake_case : str = image_std if image_std is not None else self.image_std
__snake_case : int = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__snake_case : Optional[Any] = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__snake_case : List[str] = [convert_to_rgb(A_ ) for image in images]
# All transformations expect numpy arrays.
__snake_case : List[Any] = [to_numpy_array(A_ ) for image in images]
if do_resize:
__snake_case : Optional[int] = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
if do_center_crop:
__snake_case : Union[str, Any] = [self.center_crop(image=A_ , size=A_ ) for image in images]
if do_rescale:
__snake_case : Optional[Any] = [self.rescale(image=A_ , scale=A_ ) for image in images]
if do_normalize:
__snake_case : Any = [self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images]
__snake_case : Optional[int] = [to_channel_dimension_format(A_ , A_ ) for image in images]
__snake_case : List[Any] = {"""pixel_values""": images}
return BatchFeature(data=A_ , tensor_type=A_ )
| 700
|
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
lowercase_ = "\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
lowercase_ = "\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
lowercase_ = "\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def snake_case__ ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/krishnap25/mauve""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/krishnap25/mauve"""] , reference_urls=[
"""https://arxiv.org/abs/2102.01454""",
"""https://github.com/krishnap25/mauve""",
] , )
def snake_case__ ( self : str , _lowerCAmelCase : str , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : int=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : Optional[Any]="auto" , _lowerCAmelCase : str=-1 , _lowerCAmelCase : Union[str, Any]=0.9 , _lowerCAmelCase : Any=5 , _lowerCAmelCase : Any=5_00 , _lowerCAmelCase : Optional[Any]="gpt2-large" , _lowerCAmelCase : Any=-1 , _lowerCAmelCase : Optional[Any]=10_24 , _lowerCAmelCase : Tuple=25 , _lowerCAmelCase : Tuple=5 , _lowerCAmelCase : Any=True , _lowerCAmelCase : Tuple=25 , ):
__snake_case : Optional[int] = compute_mauve(
p_text=_lowerCAmelCase , q_text=_lowerCAmelCase , p_features=_lowerCAmelCase , q_features=_lowerCAmelCase , p_tokens=_lowerCAmelCase , q_tokens=_lowerCAmelCase , num_buckets=_lowerCAmelCase , pca_max_data=_lowerCAmelCase , kmeans_explained_var=_lowerCAmelCase , kmeans_num_redo=_lowerCAmelCase , kmeans_max_iter=_lowerCAmelCase , featurize_model_name=_lowerCAmelCase , device_id=_lowerCAmelCase , max_text_length=_lowerCAmelCase , divergence_curve_discretization_size=_lowerCAmelCase , mauve_scaling_factor=_lowerCAmelCase , verbose=_lowerCAmelCase , seed=_lowerCAmelCase , )
return out
| 390
| 0
|
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
UpperCamelCase__ =parser.parse_args()
if args.model_type == "bert":
UpperCamelCase__ =BertForMaskedLM.from_pretrained(args.model_name)
UpperCamelCase__ ='bert'
else:
raise ValueError('args.model_type should be "bert".')
UpperCamelCase__ =model.state_dict()
UpperCamelCase__ ={}
for w in ["word_embeddings", "position_embeddings"]:
UpperCamelCase__ =state_dict[f"{prefix}.embeddings.{w}.weight"]
for w in ["weight", "bias"]:
UpperCamelCase__ =state_dict[f"{prefix}.embeddings.LayerNorm.{w}"]
UpperCamelCase__ =0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
UpperCamelCase__ =state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"
]
UpperCamelCase__ =state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"
]
UpperCamelCase__ =state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"
]
UpperCamelCase__ =state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"
]
UpperCamelCase__ =state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"
]
UpperCamelCase__ =state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"
]
UpperCamelCase__ =state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"
]
UpperCamelCase__ =state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"
]
std_idx += 1
UpperCamelCase__ =state_dict['cls.predictions.decoder.weight']
UpperCamelCase__ =state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCamelCase__ =state_dict[f"cls.predictions.transform.dense.{w}"]
UpperCamelCase__ =state_dict[f"cls.predictions.transform.LayerNorm.{w}"]
print(f"N layers selected for distillation: {std_idx}")
print(f"Number of params transferred for distillation: {len(compressed_sd.keys())}")
print(f"Save transferred checkpoint to {args.dump_checkpoint}.")
torch.save(compressed_sd, args.dump_checkpoint)
| 249
|
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ =get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__( __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = XLNetTokenizer
__snake_case = XLNetTokenizerFast
__snake_case = True
__snake_case = True
def UpperCamelCase_ ( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
_SCREAMING_SNAKE_CASE : List[Any] = XLNetTokenizer(__lowerCamelCase , keep_accents=__lowerCamelCase )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = "<s>"
_SCREAMING_SNAKE_CASE : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<eod>" )
self.assertEqual(len(__lowerCamelCase ) , 1_0_0_6 )
def UpperCamelCase_ ( self ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : List[Any] = XLNetTokenizer(__lowerCamelCase , keep_accents=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(__lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] )
_SCREAMING_SNAKE_CASE : str = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] )
_SCREAMING_SNAKE_CASE : int = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = XLNetTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + "",
"i",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
] , )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["▁he", "ll", "o"] )
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : List[Any] = XLNetTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
] , )
@slow
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = XLNetTokenizer.from_pretrained("xlnet-base-cased" )
_SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.encode("sequence builders" , add_special_tokens=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def UpperCamelCase_ ( self ) -> int:
# fmt: off
_SCREAMING_SNAKE_CASE : List[Any] = {"input_ids": [[1_7, 2_1_4_4_2, 2_7_0, 1_7, 1_0, 1_4_6_4_5, 3_1_8, 3_4, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 7_7_5_2, 2_2_0_1_8, 2_3, 2_1, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 3_3_5_2, 1_4_4_3_1, 1_3, 5_5_0_0, 1_1, 1_1_7_6, 5_8_0, 1_3, 1_6_8_1_9, 4_7_9_7, 2_3, 1_7, 1_0, 1_7_1_3_5, 6_5_8, 1_9, 4_5_7, 7_9_3_2, 1_3, 1_8_4, 1_9, 3_1_5_4, 1_7_1_3_5, 6_4_6_8, 1_9, 1_4_0_4, 1_2_2_6_9, 1_9, 4_2_2_9, 5_3_5_6, 1_6_2_6_4, 4_6, 1_9, 1_7, 2_0_5_4_5, 1_0_3_9_5, 9, 9, 9, 1_1, 2_8, 6_4_2_1, 9_5_3_1, 2_0_7_2_9, 1_7, 1_0, 3_5_3, 1_7_0_2_2, 1_1, 2_1, 6_4_2_1, 9_5_3_1, 1_6_9_4_9, 1_7, 1_0, 1_1_5_0_9, 7_5_3, 1_1, 3_3, 9_5, 2_4_2_1, 7_3_8_5, 9_5_6, 1_4_4_3_1, 2_6_2_6, 2_5, 8_4_2, 7_3_8_5, 4_8_3_6, 2_1, 1_4_2_9, 2_2_7_2, 9_8_5_5, 3_1_2_0, 1_6_1, 2_4_7_3_8, 1_9, 1_3_2_0_3, 6_5_8, 2_1_8, 7_8_7, 2_1, 4_3_0, 1_8_4_8_2, 8_4_7, 2_6_3_7, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2_2, 2_2_1_7_8, 2_7, 1_0_6_4, 2_2, 9_5_6, 1_3, 1_1_1_0_1, 1_4_2_9, 5_8_5_4, 2_4_3_1_3, 1_8_9_5_3, 4_0, 4_2_2, 2_4_3_6_6, 6_8, 1_7_5_8, 3_7, 1_0_4_8_3, 1_4_2_5_7, 3_1, 2_0_7, 2_6_3, 2_1, 2_0_3, 3_7_7_3, 2_5, 7_1, 9_7_3_5, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2, 2_0_4_9, 3_4_4_2, 1_7, 1_3_8_9_4, 3_3_8_0, 2_3, 9_5, 1_8, 1_7_6_3_4, 2_2_8_8, 9, 4, 3]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name="xlnet-base-cased" , revision="c841166438c31ec7ca9a106dee7bb312b73ae511" , )
| 249
| 1
|
'''simple docstring'''
from __future__ import annotations
lowerCamelCase__ = list[tuple[int, int]]
lowerCamelCase__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCamelCase__ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : float , UpperCamelCase_ : Node | None , ) -> List[Any]:
'''simple docstring'''
_lowercase : Dict = pos_x
_lowercase : List[str] = pos_y
_lowercase : int = (pos_y, pos_x)
_lowercase : Tuple = goal_x
_lowercase : List[str] = goal_y
_lowercase : str = g_cost
_lowercase : Optional[int] = parent
_lowercase : List[str] = self.calculate_heuristic()
def __lowercase ( self : Optional[int] ) -> float:
'''simple docstring'''
_lowercase : Tuple = abs(self.pos_x - self.goal_x )
_lowercase : str = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self : int , UpperCamelCase_ : Dict ) -> bool:
'''simple docstring'''
return self.f_cost < other.f_cost
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase_ : tuple[int, int] , UpperCamelCase_ : tuple[int, int] ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Union[str, Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , UpperCamelCase_ )
_lowercase : List[str] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , UpperCamelCase_ )
_lowercase : Optional[Any] = [self.start]
_lowercase : list[Node] = []
_lowercase : Union[str, Any] = False
def __lowercase ( self : str ) -> Path | None:
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_lowercase : List[str] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
_lowercase : Optional[int] = True
return self.retrace_path(UpperCamelCase_ )
self.closed_nodes.append(UpperCamelCase_ )
_lowercase : Any = self.get_successors(UpperCamelCase_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(UpperCamelCase_ )
else:
# retrieve the best current path
_lowercase : List[Any] = self.open_nodes.pop(self.open_nodes.index(UpperCamelCase_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(UpperCamelCase_ )
else:
self.open_nodes.append(UpperCamelCase_ )
if not self.reached:
return [self.start.pos]
return None
def __lowercase ( self : List[Any] , UpperCamelCase_ : Node ) -> list[Node]:
'''simple docstring'''
_lowercase : Any = []
for action in delta:
_lowercase : Optional[Any] = parent.pos_x + action[1]
_lowercase : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCamelCase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
UpperCamelCase_ , UpperCamelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , UpperCamelCase_ , ) )
return successors
def __lowercase ( self : Optional[int] , UpperCamelCase_ : Node | None ) -> Path:
'''simple docstring'''
_lowercase : Any = node
_lowercase : Tuple = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_lowercase : Union[str, Any] = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
lowerCamelCase__ = (0, 0)
lowerCamelCase__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('------')
lowerCamelCase__ = GreedyBestFirst(init, goal)
lowerCamelCase__ = greedy_bf.search()
if path:
for pos_x, pos_y in path:
lowerCamelCase__ = 2
for elem in grid:
print(elem)
| 411
|
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = OrderedDict(
[
('align', 'EfficientNetImageProcessor'),
('beit', 'BeitImageProcessor'),
('bit', 'BitImageProcessor'),
('blip', 'BlipImageProcessor'),
('blip-2', 'BlipImageProcessor'),
('bridgetower', 'BridgeTowerImageProcessor'),
('chinese_clip', 'ChineseCLIPImageProcessor'),
('clip', 'CLIPImageProcessor'),
('clipseg', 'ViTImageProcessor'),
('conditional_detr', 'ConditionalDetrImageProcessor'),
('convnext', 'ConvNextImageProcessor'),
('convnextv2', 'ConvNextImageProcessor'),
('cvt', 'ConvNextImageProcessor'),
('data2vec-vision', 'BeitImageProcessor'),
('deformable_detr', 'DeformableDetrImageProcessor'),
('deit', 'DeiTImageProcessor'),
('deta', 'DetaImageProcessor'),
('detr', 'DetrImageProcessor'),
('dinat', 'ViTImageProcessor'),
('donut-swin', 'DonutImageProcessor'),
('dpt', 'DPTImageProcessor'),
('efficientformer', 'EfficientFormerImageProcessor'),
('efficientnet', 'EfficientNetImageProcessor'),
('flava', 'FlavaImageProcessor'),
('focalnet', 'BitImageProcessor'),
('git', 'CLIPImageProcessor'),
('glpn', 'GLPNImageProcessor'),
('groupvit', 'CLIPImageProcessor'),
('imagegpt', 'ImageGPTImageProcessor'),
('instructblip', 'BlipImageProcessor'),
('layoutlmv2', 'LayoutLMv2ImageProcessor'),
('layoutlmv3', 'LayoutLMv3ImageProcessor'),
('levit', 'LevitImageProcessor'),
('mask2former', 'Mask2FormerImageProcessor'),
('maskformer', 'MaskFormerImageProcessor'),
('mgp-str', 'ViTImageProcessor'),
('mobilenet_v1', 'MobileNetV1ImageProcessor'),
('mobilenet_v2', 'MobileNetV2ImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevitv2', 'MobileViTImageProcessor'),
('nat', 'ViTImageProcessor'),
('oneformer', 'OneFormerImageProcessor'),
('owlvit', 'OwlViTImageProcessor'),
('perceiver', 'PerceiverImageProcessor'),
('pix2struct', 'Pix2StructImageProcessor'),
('poolformer', 'PoolFormerImageProcessor'),
('regnet', 'ConvNextImageProcessor'),
('resnet', 'ConvNextImageProcessor'),
('sam', 'SamImageProcessor'),
('segformer', 'SegformerImageProcessor'),
('swiftformer', 'ViTImageProcessor'),
('swin', 'ViTImageProcessor'),
('swin2sr', 'Swin2SRImageProcessor'),
('swinv2', 'ViTImageProcessor'),
('table-transformer', 'DetrImageProcessor'),
('timesformer', 'VideoMAEImageProcessor'),
('tvlt', 'TvltImageProcessor'),
('upernet', 'SegformerImageProcessor'),
('van', 'ConvNextImageProcessor'),
('videomae', 'VideoMAEImageProcessor'),
('vilt', 'ViltImageProcessor'),
('vit', 'ViTImageProcessor'),
('vit_hybrid', 'ViTHybridImageProcessor'),
('vit_mae', 'ViTImageProcessor'),
('vit_msn', 'ViTImageProcessor'),
('xclip', 'CLIPImageProcessor'),
('yolos', 'YolosImageProcessor'),
]
)
lowerCamelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def _SCREAMING_SNAKE_CASE( snake_case_ : str ) ->Optional[Any]:
'''simple docstring'''
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_lowercase : Optional[int] = model_type_to_module_name(snake_case_ )
_lowercase : Optional[Any] = importlib.import_module(F".{module_name}" , '''transformers.models''' )
try:
return getattr(snake_case_ , snake_case_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(snake_case_ , '''__name__''' , snake_case_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_lowercase : int = importlib.import_module('''transformers''' )
if hasattr(snake_case_ , snake_case_ ):
return getattr(snake_case_ , snake_case_ )
return None
def _SCREAMING_SNAKE_CASE( snake_case_ : Union[str, os.PathLike] , snake_case_ : Optional[Union[str, os.PathLike]] = None , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : Optional[Dict[str, str]] = None , snake_case_ : Optional[Union[bool, str]] = None , snake_case_ : Optional[str] = None , snake_case_ : bool = False , **snake_case_ : int , ) ->Union[str, Any]:
'''simple docstring'''
_lowercase : Dict = get_file_from_repo(
snake_case_ , snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , resume_download=snake_case_ , proxies=snake_case_ , use_auth_token=snake_case_ , revision=snake_case_ , local_files_only=snake_case_ , )
if resolved_config_file is None:
logger.info(
'''Could not locate the image processor configuration file, will try to use the model config instead.''' )
return {}
with open(snake_case_ , encoding='''utf-8''' ) as reader:
return json.load(snake_case_ )
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : int ) -> Tuple:
'''simple docstring'''
raise EnvironmentError(
'''AutoImageProcessor is designed to be instantiated '''
'''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(UpperCamelCase_ )
def __lowercase ( cls : str , UpperCamelCase_ : Dict , **UpperCamelCase_ : Any ) -> Tuple:
'''simple docstring'''
_lowercase : int = kwargs.pop('''config''' , UpperCamelCase_ )
_lowercase : Union[str, Any] = kwargs.pop('''trust_remote_code''' , UpperCamelCase_ )
_lowercase : str = True
_lowercase , _lowercase : int = ImageProcessingMixin.get_image_processor_dict(UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Any = config_dict.get('''image_processor_type''' , UpperCamelCase_ )
_lowercase : List[str] = None
if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ):
_lowercase : List[str] = config_dict['''auto_map''']['''AutoImageProcessor''']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_lowercase : str = config_dict.pop('''feature_extractor_type''' , UpperCamelCase_ )
if feature_extractor_class is not None:
logger.warning(
'''Could not find image processor class in the image processor config or the model config. Loading'''
''' based on pattern matching with the model\'s feature extractor configuration.''' )
_lowercase : Any = feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' )
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
_lowercase : List[str] = config_dict['''auto_map''']['''AutoFeatureExtractor''']
_lowercase : List[str] = feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' )
logger.warning(
'''Could not find image processor auto map in the image processor config or the model config.'''
''' Loading based on pattern matching with the model\'s feature extractor configuration.''' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_lowercase : Tuple = AutoConfig.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
# It could be in `config.image_processor_type``
_lowercase : Optional[int] = getattr(UpperCamelCase_ , '''image_processor_type''' , UpperCamelCase_ )
if hasattr(UpperCamelCase_ , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map:
_lowercase : List[Any] = config.auto_map['''AutoImageProcessor''']
if image_processor_class is not None:
_lowercase : int = image_processor_class_from_name(UpperCamelCase_ )
_lowercase : str = image_processor_auto_map is not None
_lowercase : List[str] = image_processor_class is not None or type(UpperCamelCase_ ) in IMAGE_PROCESSOR_MAPPING
_lowercase : Tuple = resolve_trust_remote_code(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if has_remote_code and trust_remote_code:
_lowercase : Dict = get_class_from_dynamic_module(
UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : List[str] = kwargs.pop('''code_revision''' , UpperCamelCase_ )
if os.path.isdir(UpperCamelCase_ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
elif image_processor_class is not None:
return image_processor_class.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(UpperCamelCase_ ) in IMAGE_PROCESSOR_MAPPING:
_lowercase : List[str] = IMAGE_PROCESSOR_MAPPING[type(UpperCamelCase_ )]
return image_processor_class.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
raise ValueError(
F"Unrecognized image processor in {pretrained_model_name_or_path}. Should have a "
F"`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following "
F"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}" )
@staticmethod
def __lowercase ( UpperCamelCase_ : Dict , UpperCamelCase_ : Dict ) -> Optional[int]:
'''simple docstring'''
IMAGE_PROCESSOR_MAPPING.register(UpperCamelCase_ , UpperCamelCase_ )
| 411
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A : Any = {"""configuration_xglm""": ["""XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XGLMConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[Any] = ["""XGLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Any = ["""XGLMTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str = [
"""XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XGLMForCausalLM""",
"""XGLMModel""",
"""XGLMPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Tuple = [
"""FlaxXGLMForCausalLM""",
"""FlaxXGLMModel""",
"""FlaxXGLMPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[Any] = [
"""TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXGLMForCausalLM""",
"""TFXGLMModel""",
"""TFXGLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_A : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 100
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Any = IFInpaintingPipeline
lowerCamelCase__ : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
lowerCamelCase__ : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCamelCase__ : Tuple = PipelineTesterMixin.required_optional_params - {"""latents"""}
def lowercase_ ( self ):
'''simple docstring'''
return self._get_dummy_components()
def lowercase_ ( self , A_ , A_=0 ):
'''simple docstring'''
if str(A_ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(A_ )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=A_ ).manual_seed(A_ )
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ )
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ )
SCREAMING_SNAKE_CASE__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowercase_ ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowercase_ ( self ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def lowercase_ ( self ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowercase_ ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowercase_ ( self ):
'''simple docstring'''
self._test_save_load_local()
def lowercase_ ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 100
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {'''configuration_sew''': ['''SEW_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SEWConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''SEW_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SEWForCTC''',
'''SEWForSequenceClassification''',
'''SEWModel''',
'''SEWPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 115
|
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
a_, a_, a_ = False, False, False
@dataclass
class lowercase__ :
a_ =None
a_ =True
a_ =True
a_ =None
# Automatically constructed
a_ ="dict"
a_ =pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
a_ =field(default="""Audio""", init=_UpperCAmelCase, repr=_UpperCAmelCase )
def __call__( self )-> Optional[int]:
'''simple docstring'''
return self.pa_type
def UpperCAmelCase ( self , __UpperCAmelCase )-> dict:
'''simple docstring'''
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return {"bytes": None, "path": value}
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
lowerCAmelCase__ = BytesIO()
sf.write(__UpperCAmelCase , value["array"] , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
lowerCAmelCase__ = np.frombuffer(value["bytes"] , dtype=np.intaa ).astype(np.floataa ) / 32767
else:
lowerCAmelCase__ = np.memmap(value["path"] , dtype="h" , mode="r" ).astype(np.floataa ) / 32767
lowerCAmelCase__ = BytesIO(bytes() )
sf.write(__UpperCAmelCase , __UpperCAmelCase , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
F"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}." )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> dict:
'''simple docstring'''
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
lowerCAmelCase__ , lowerCAmelCase__ = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(F"An audio sample should have one of 'path' or 'bytes' but both are None in {value}." )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
lowerCAmelCase__ = xsplitext(__UpperCAmelCase )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
lowerCAmelCase__ = token_per_repo_id or {}
lowerCAmelCase__ = path.split("::" )[-1]
try:
lowerCAmelCase__ = string_to_dict(__UpperCAmelCase , config.HUB_DATASETS_URL )["repo_id"]
lowerCAmelCase__ = token_per_repo_id[repo_id]
except (ValueError, KeyError):
lowerCAmelCase__ = None
with xopen(__UpperCAmelCase , "rb" , use_auth_token=__UpperCAmelCase ) as f:
lowerCAmelCase__ , lowerCAmelCase__ = sf.read(__UpperCAmelCase )
else:
lowerCAmelCase__ , lowerCAmelCase__ = sf.read(__UpperCAmelCase )
lowerCAmelCase__ = array.T
if self.mono:
lowerCAmelCase__ = librosa.to_mono(__UpperCAmelCase )
if self.sampling_rate and self.sampling_rate != sampling_rate:
lowerCAmelCase__ = librosa.resample(__UpperCAmelCase , orig_sr=__UpperCAmelCase , target_sr=self.sampling_rate )
lowerCAmelCase__ = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def UpperCAmelCase ( self )-> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def UpperCAmelCase ( self , __UpperCAmelCase )-> pa.StructArray:
'''simple docstring'''
if pa.types.is_string(storage.type ):
lowerCAmelCase__ = pa.array([None] * len(__UpperCAmelCase ) , type=pa.binary() )
lowerCAmelCase__ = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowerCAmelCase__ = pa.array([None] * len(__UpperCAmelCase ) , type=pa.string() )
lowerCAmelCase__ = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
lowerCAmelCase__ = pa.array([Audio().encode_example(__UpperCAmelCase ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
lowerCAmelCase__ = storage.field("bytes" )
else:
lowerCAmelCase__ = pa.array([None] * len(__UpperCAmelCase ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
lowerCAmelCase__ = storage.field("path" )
else:
lowerCAmelCase__ = pa.array([None] * len(__UpperCAmelCase ) , type=pa.string() )
lowerCAmelCase__ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
return array_cast(__UpperCAmelCase , self.pa_type )
def UpperCAmelCase ( self , __UpperCAmelCase )-> pa.StructArray:
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(__UpperCAmelCase ):
with xopen(__UpperCAmelCase , "rb" ) as f:
lowerCAmelCase__ = f.read()
return bytes_
lowerCAmelCase__ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowerCAmelCase__ = pa.array(
[os.path.basename(__UpperCAmelCase ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
lowerCAmelCase__ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(__UpperCAmelCase , self.pa_type )
| 115
| 1
|
def UpperCamelCase ( __magic_name__ : int = 10**9 ) -> int:
"""simple docstring"""
lowercase__ = 1
lowercase__ = 2
lowercase__ = 0
lowercase__ = 0
lowercase__ = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
lowercase__ = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F'{solution() = }')
| 15
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_snake_case = {
'''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''],
'''tokenization_tapas''': ['''TapasTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TapasForMaskedLM''',
'''TapasForQuestionAnswering''',
'''TapasForSequenceClassification''',
'''TapasModel''',
'''TapasPreTrainedModel''',
'''load_tf_weights_in_tapas''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFTapasForMaskedLM''',
'''TFTapasForQuestionAnswering''',
'''TFTapasForSequenceClassification''',
'''TFTapasModel''',
'''TFTapasPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 340
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , a_ , a_=7 , a_=3 , a_=18 , a_=30 , a_=400 , a_=True , a_=None , a_=True , ):
lowerCamelCase_ : int = size if size is not None else {"height": 18, "width": 18}
lowerCamelCase_ : str = parent
lowerCamelCase_ : str = batch_size
lowerCamelCase_ : Tuple = num_channels
lowerCamelCase_ : Optional[int] = image_size
lowerCamelCase_ : List[str] = min_resolution
lowerCamelCase_ : Tuple = max_resolution
lowerCamelCase_ : Tuple = do_resize
lowerCamelCase_ : Dict = size
lowerCamelCase_ : List[str] = apply_ocr
def _UpperCamelCase ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase__ ( __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = LayoutLMvaImageProcessingTester(self )
@property
def _UpperCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , "do_resize" ) )
self.assertTrue(hasattr(a_ , "size" ) )
self.assertTrue(hasattr(a_ , "apply_ocr" ) )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
lowerCamelCase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
lowerCamelCase_ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , a_ )
self.assertIsInstance(encoding.boxes , a_ )
# Test batched
lowerCamelCase_ : int = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
# Test not batched input
lowerCamelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase_ : Any = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
lowerCamelCase_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase_ : Union[str, Any] = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# with apply_OCR = True
lowerCamelCase_ : Any = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowerCamelCase_ : Optional[Any] = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
lowerCamelCase_ : Optional[Any] = Image.open(ds[0]["file"] ).convert("RGB" )
lowerCamelCase_ : List[Any] = image_processing(a_ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowerCamelCase_ : List[Any] = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
lowerCamelCase_ : Tuple = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , a_ )
self.assertListEqual(encoding.boxes , a_ )
# with apply_OCR = False
lowerCamelCase_ : List[str] = LayoutLMvaImageProcessor(apply_ocr=a_ )
lowerCamelCase_ : List[str] = image_processing(a_ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 73
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = '''cvt'''
def __init__( self , a_=3 , a_=[7, 3, 3] , a_=[4, 2, 2] , a_=[2, 1, 1] , a_=[64, 192, 384] , a_=[1, 3, 6] , a_=[1, 2, 10] , a_=[4.0, 4.0, 4.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.1] , a_=[True, True, True] , a_=[False, False, True] , a_=["dw_bn", "dw_bn", "dw_bn"] , a_=[3, 3, 3] , a_=[1, 1, 1] , a_=[2, 2, 2] , a_=[1, 1, 1] , a_=[1, 1, 1] , a_=0.02 , a_=1E-12 , **a_ , ):
super().__init__(**a_ )
lowerCamelCase_ : Optional[Any] = num_channels
lowerCamelCase_ : str = patch_sizes
lowerCamelCase_ : List[Any] = patch_stride
lowerCamelCase_ : str = patch_padding
lowerCamelCase_ : str = embed_dim
lowerCamelCase_ : Union[str, Any] = num_heads
lowerCamelCase_ : Optional[Any] = depth
lowerCamelCase_ : int = mlp_ratio
lowerCamelCase_ : Union[str, Any] = attention_drop_rate
lowerCamelCase_ : Optional[Any] = drop_rate
lowerCamelCase_ : Optional[int] = drop_path_rate
lowerCamelCase_ : Union[str, Any] = qkv_bias
lowerCamelCase_ : int = cls_token
lowerCamelCase_ : int = qkv_projection_method
lowerCamelCase_ : int = kernel_qkv
lowerCamelCase_ : Optional[Any] = padding_kv
lowerCamelCase_ : Optional[int] = stride_kv
lowerCamelCase_ : Optional[int] = padding_q
lowerCamelCase_ : List[Any] = stride_q
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : int = layer_norm_eps
| 73
| 1
|
'''simple docstring'''
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def UpperCamelCase ( lowercase_ : Tuple , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase =StableDiffusionPipeline.from_pretrained(lowercase_ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
lowercase =load_file(lowercase_ )
lowercase =[]
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
lowercase =key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' )
lowercase =pipeline.text_encoder
else:
lowercase =key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' )
lowercase =pipeline.unet
# find the target layer
lowercase =layer_infos.pop(0 )
while len(lowercase_ ) > -1:
try:
lowercase =curr_layer.__getattr__(lowercase_ )
if len(lowercase_ ) > 0:
lowercase =layer_infos.pop(0 )
elif len(lowercase_ ) == 0:
break
except Exception:
if len(lowercase_ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
lowercase =layer_infos.pop(0 )
lowercase =[]
if "lora_down" in key:
pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) )
pair_keys.append(lowercase_ )
else:
pair_keys.append(lowercase_ )
pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
lowercase =state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
lowercase =state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ ).unsqueeze(2 ).unsqueeze(3 )
else:
lowercase =state_dict[pair_keys[0]].to(torch.floataa )
lowercase =state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ )
# update visited list
for item in pair_keys:
visited.append(lowercase_ )
return pipeline
if __name__ == "__main__":
_UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_path''', default=None, type=str, required=True, help='''Path to the base model in diffusers format.'''
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--lora_prefix_unet''', default='''lora_unet''', type=str, help='''The prefix of UNet weight in safetensors'''
)
parser.add_argument(
'''--lora_prefix_text_encoder''',
default='''lora_te''',
type=str,
help='''The prefix of text encoder weight in safetensors''',
)
parser.add_argument('''--alpha''', default=0.75, type=float, help='''The merging ratio in W = W0 + alpha * deltaW''')
parser.add_argument(
'''--to_safetensors''', action='''store_true''', help='''Whether to store pipeline in safetensors format or not.'''
)
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
_UpperCAmelCase : Tuple = parser.parse_args()
_UpperCAmelCase : Any = args.base_model_path
_UpperCAmelCase : Any = args.checkpoint_path
_UpperCAmelCase : Optional[int] = args.dump_path
_UpperCAmelCase : Optional[int] = args.lora_prefix_unet
_UpperCAmelCase : Dict = args.lora_prefix_text_encoder
_UpperCAmelCase : str = args.alpha
_UpperCAmelCase : Optional[int] = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
_UpperCAmelCase : Tuple = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 72
|
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def UpperCamelCase ( lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Tuple ) -> List[Any]:
'''simple docstring'''
if isinstance(lowercase_ , lowercase_ ):
lowercase =np.full((len(lowercase_ ), sequence_length, 2) , lowercase_ )
else:
lowercase =np.full((len(lowercase_ ), sequence_length) , lowercase_ )
for i, tensor in enumerate(lowercase_ ):
if padding_side == "right":
if isinstance(lowercase_ , lowercase_ ):
lowercase =tensor[:sequence_length]
else:
lowercase =tensor[:sequence_length]
else:
if isinstance(lowercase_ , lowercase_ ):
lowercase =tensor[:sequence_length]
else:
lowercase =tensor[:sequence_length]
return out_tensor.tolist()
def UpperCamelCase ( lowercase_ : Optional[Any] ) -> str:
'''simple docstring'''
lowercase =ord(lowercase_ )
if (cp >= 3_3 and cp <= 4_7) or (cp >= 5_8 and cp <= 6_4) or (cp >= 9_1 and cp <= 9_6) or (cp >= 1_2_3 and cp <= 1_2_6):
return True
lowercase =unicodedata.category(lowercase_ )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 42
UpperCamelCase__ = True
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = -1_00
UpperCamelCase__ = "pt"
def _A( self , snake_case_ ):
import torch
lowercase ='''label''' if '''label''' in features[0].keys() else '''labels'''
lowercase =[feature[label_name] for feature in features] if label_name in features[0].keys() else None
lowercase =self.tokenizer.pad(
snake_case_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
lowercase =torch.tensor(batch['''entity_ids'''] ).shape[1]
lowercase =self.tokenizer.padding_side
if padding_side == "right":
lowercase =[
list(snake_case_ ) + [self.label_pad_token_id] * (sequence_length - len(snake_case_ )) for label in labels
]
else:
lowercase =[
[self.label_pad_token_id] * (sequence_length - len(snake_case_ )) + list(snake_case_ ) for label in labels
]
lowercase =[feature['''ner_tags'''] for feature in features]
lowercase =padding_tensor(snake_case_ , -1 , snake_case_ , snake_case_ )
lowercase =[feature['''original_entity_spans'''] for feature in features]
lowercase =padding_tensor(snake_case_ , (-1, -1) , snake_case_ , snake_case_ )
lowercase ={k: torch.tensor(snake_case_ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 72
| 1
|
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowercase :
def __init__( self: List[Any] , UpperCamelCase__: int , UpperCamelCase__: Tuple=13 , UpperCamelCase__: Optional[Any]=32 , UpperCamelCase__: int=2 , UpperCamelCase__: Optional[int]=3 , UpperCamelCase__: Tuple=16 , UpperCamelCase__: str=[1, 2, 1] , UpperCamelCase__: int=[2, 2, 4] , UpperCamelCase__: Union[str, Any]=2 , UpperCamelCase__: Optional[Any]=2.0 , UpperCamelCase__: List[Any]=True , UpperCamelCase__: List[Any]=0.0 , UpperCamelCase__: Dict=0.0 , UpperCamelCase__: str=0.1 , UpperCamelCase__: int="gelu" , UpperCamelCase__: List[str]=False , UpperCamelCase__: int=True , UpperCamelCase__: str=0.02 , UpperCamelCase__: List[str]=1e-5 , UpperCamelCase__: Any=True , UpperCamelCase__: Any=None , UpperCamelCase__: List[Any]=True , UpperCamelCase__: Union[str, Any]=10 , UpperCamelCase__: Union[str, Any]=8 , ):
lowerCamelCase__ : List[Any] = parent
lowerCamelCase__ : List[Any] = batch_size
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : List[str] = patch_size
lowerCamelCase__ : Any = num_channels
lowerCamelCase__ : Optional[int] = embed_dim
lowerCamelCase__ : int = depths
lowerCamelCase__ : Union[str, Any] = num_heads
lowerCamelCase__ : Optional[Any] = window_size
lowerCamelCase__ : List[str] = mlp_ratio
lowerCamelCase__ : Dict = qkv_bias
lowerCamelCase__ : List[str] = hidden_dropout_prob
lowerCamelCase__ : List[Any] = attention_probs_dropout_prob
lowerCamelCase__ : Dict = drop_path_rate
lowerCamelCase__ : Union[str, Any] = hidden_act
lowerCamelCase__ : Any = use_absolute_embeddings
lowerCamelCase__ : Tuple = patch_norm
lowerCamelCase__ : Union[str, Any] = layer_norm_eps
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : Dict = is_training
lowerCamelCase__ : Dict = scope
lowerCamelCase__ : Any = use_labels
lowerCamelCase__ : Optional[int] = type_sequence_label_size
lowerCamelCase__ : List[str] = encoder_stride
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Any = None
if self.use_labels:
lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: Optional[Any] ):
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: int , UpperCamelCase__: Dict ):
lowerCamelCase__ : List[str] = SwinvaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Any = model(UpperCamelCase__ )
lowerCamelCase__ : Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCamelCase__ : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Dict , UpperCamelCase__: Any ):
lowerCamelCase__ : Dict = SwinvaForMaskedImageModeling(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase__ : List[Any] = 1
lowerCamelCase__ : List[Any] = SwinvaForMaskedImageModeling(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: str , UpperCamelCase__: List[str] ):
lowerCamelCase__ : List[str] = self.type_sequence_label_size
lowerCamelCase__ : List[Any] = SwinvaForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Union[str, Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Tuple = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = config_and_inputs
lowerCamelCase__ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
a = (
{"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification}
if is_torch_available()
else {}
)
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Dict = SwinvaModelTester(self )
lowerCamelCase__ : Optional[int] = ConfigTester(self , config_class=UpperCamelCase__ , embed_dim=37 )
def lowerCamelCase_ ( self: List[Any] ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
@unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" )
def lowerCamelCase_ ( self: Optional[int] ):
pass
@unittest.skip(reason="""Swinv2 does not use inputs_embeds""" )
def lowerCamelCase_ ( self: int ):
pass
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase__ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : List[str] = model_class(UpperCamelCase__ )
lowerCamelCase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Optional[int] = [*signature.parameters.keys()]
lowerCamelCase__ : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ , lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Union[str, Any] = True
for model_class in self.all_model_classes:
lowerCamelCase__ : int = True
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : List[str] = outputs.attentions
lowerCamelCase__ : Any = len(self.model_tester.depths )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase__ : Any = True
lowerCamelCase__ : List[Any] = config.window_size**2
lowerCamelCase__ : List[str] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Union[str, Any] = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
lowerCamelCase__ : List[Any] = len(UpperCamelCase__ )
# Check attention is always last and order is fine
lowerCamelCase__ : Dict = True
lowerCamelCase__ : int = True
lowerCamelCase__ : Optional[Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
if hasattr(self.model_tester , """num_hidden_states_types""" ):
lowerCamelCase__ : List[str] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
lowerCamelCase__ : Tuple = 2
self.assertEqual(out_len + added_hidden_states , len(UpperCamelCase__ ) )
lowerCamelCase__ : str = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Any , UpperCamelCase__: Optional[int] , UpperCamelCase__: str ):
lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : List[Any] = outputs.hidden_states
lowerCamelCase__ : Optional[int] = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# Swinv2 has a different seq_length
lowerCamelCase__ : List[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCamelCase__ : Dict = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = reshaped_hidden_states[0].shape
lowerCamelCase__ : Optional[Any] = (
reshaped_hidden_states[0].view(UpperCamelCase__ , UpperCamelCase__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ , lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowerCamelCase__ : List[Any] = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : Optional[int] = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Optional[Any] = 3
lowerCamelCase__ : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCamelCase__ : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase__ : str = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCamelCase__ : str = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : Tuple = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , (padded_height, padded_width) )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: Tuple ):
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Tuple = SwinvaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Dict = _config_zero_init(UpperCamelCase__ )
for model_class in self.all_model_classes:
lowerCamelCase__ : Any = model_class(config=UpperCamelCase__ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: List[Any] ):
return (
AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" )
if is_vision_available()
else None
)
@slow
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Optional[Any] = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to(
UpperCamelCase__ )
lowerCamelCase__ : List[str] = self.default_image_processor
lowerCamelCase__ : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCamelCase__ : Union[str, Any] = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Any = model(**UpperCamelCase__ )
# verify the logits
lowerCamelCase__ : int = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : Tuple = torch.tensor([-0.3_947, -0.4_306, 0.0_026] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 631
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Optional[Any] =logging.get_logger(__name__)
_A : Optional[int] ={
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class _lowercase ( _lowercase ):
a = """rwkv"""
a = {"""max_position_embeddings""": """context_length"""}
def __init__( self: Tuple , UpperCamelCase__: Optional[Any]=50_277 , UpperCamelCase__: Union[str, Any]=1_024 , UpperCamelCase__: Tuple=4_096 , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Dict=None , UpperCamelCase__: Dict=None , UpperCamelCase__: int=1e-5 , UpperCamelCase__: Any=0 , UpperCamelCase__: str=0 , UpperCamelCase__: Union[str, Any]=6 , UpperCamelCase__: Optional[int]=False , UpperCamelCase__: Dict=True , **UpperCamelCase__: Dict , ):
lowerCamelCase__ : Dict = vocab_size
lowerCamelCase__ : Optional[Any] = context_length
lowerCamelCase__ : Optional[Any] = hidden_size
lowerCamelCase__ : Any = num_hidden_layers
lowerCamelCase__ : int = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowerCamelCase__ : Union[str, Any] = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowerCamelCase__ : List[str] = layer_norm_epsilon
lowerCamelCase__ : int = rescale_every
lowerCamelCase__ : Optional[int] = use_cache
lowerCamelCase__ : Dict = bos_token_id
lowerCamelCase__ : Any = eos_token_id
super().__init__(
tie_word_embeddings=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 631
| 1
|
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] ,__A : Union[str, Any] ,__A : Optional[Any]=768 ) -> Optional[int]:
super().__init__(__A )
_lowercase = proj_size
_lowercase = CLIPVisionModel(__A )
_lowercase = PaintByExampleMapper(__A )
_lowercase = nn.LayerNorm(config.hidden_size )
_lowercase = nn.Linear(config.hidden_size ,self.proj_size )
# uncondition for scaling
_lowercase = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def __UpperCAmelCase ( self : str ,__A : Optional[int] ,__A : Optional[int]=False ) -> Union[str, Any]:
_lowercase = self.model(pixel_values=__A )
_lowercase = clip_output.pooler_output
_lowercase = self.mapper(latent_states[:, None] )
_lowercase = self.final_layer_norm(__A )
_lowercase = self.proj_out(__A )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class A_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] ,__A : Dict ) -> str:
super().__init__()
_lowercase = (config.num_hidden_layers + 1) // 5
_lowercase = config.hidden_size
_lowercase = 1
_lowercase = nn.ModuleList(
[
BasicTransformerBlock(__A ,__A ,__A ,activation_fn='gelu' ,attention_bias=__A )
for _ in range(__A )
] )
def __UpperCAmelCase ( self : Tuple ,__A : Optional[Any] ) -> Dict:
for block in self.blocks:
_lowercase = block(__A )
return hidden_states
| 67
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : List[Any] = logging.get_logger(__name__)
snake_case__ : str = {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = "dpr"
def __init__( self : Union[str, Any] , __a : Optional[Any]=30_522 , __a : List[Any]=768 , __a : List[Any]=12 , __a : Dict=12 , __a : Union[str, Any]=3_072 , __a : Any="gelu" , __a : Any=0.1 , __a : Any=0.1 , __a : Tuple=512 , __a : int=2 , __a : Optional[Any]=0.02 , __a : List[Any]=1e-12 , __a : int=0 , __a : int="absolute" , __a : int = 0 , **__a : Optional[int] , ) ->Tuple:
super().__init__(pad_token_id=__a , **__a )
lowerCamelCase_ : str = vocab_size
lowerCamelCase_ : List[Any] = hidden_size
lowerCamelCase_ : Optional[int] = num_hidden_layers
lowerCamelCase_ : Optional[Any] = num_attention_heads
lowerCamelCase_ : List[Any] = hidden_act
lowerCamelCase_ : Optional[Any] = intermediate_size
lowerCamelCase_ : List[Any] = hidden_dropout_prob
lowerCamelCase_ : Any = attention_probs_dropout_prob
lowerCamelCase_ : Any = max_position_embeddings
lowerCamelCase_ : Tuple = type_vocab_size
lowerCamelCase_ : Dict = initializer_range
lowerCamelCase_ : List[Any] = layer_norm_eps
lowerCamelCase_ : List[str] = projection_dim
lowerCamelCase_ : Optional[int] = position_embedding_type
| 278
| 0
|
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def UpperCamelCase ( _A : Optional[Any] )-> int:
"""simple docstring"""
for param in module.parameters():
A__ = False
def UpperCamelCase ( )-> str:
"""simple docstring"""
A__ = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
A__ = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def UpperCamelCase ( _A : Optional[int] )-> int:
"""simple docstring"""
A__ = plt.imshow(lowerCamelCase__ )
fig.axes.get_xaxis().set_visible(lowerCamelCase__ )
fig.axes.get_yaxis().set_visible(lowerCamelCase__ )
plt.show()
def UpperCamelCase ( )-> Dict:
"""simple docstring"""
A__ = datetime.now()
A__ = current_time.strftime("%H:%M:%S" )
return timestamp
| 710
|
import requests
def UpperCamelCase ( _A : str , _A : str )-> None:
"""simple docstring"""
A__ = {"Content-Type": "application/json"}
A__ = requests.post(_A , json={"text": message_body} , headers=_A )
if response.status_code != 200:
A__ = (
"Request to slack returned an error "
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(_A )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>")
| 232
| 0
|
from __future__ import annotations
import requests
def snake_case_ (__A : str ) -> dict:
__lowerCAmelCase : Tuple = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(__A ).json()
def snake_case_ (__A : int = 1_0 ) -> list[dict]:
__lowerCAmelCase : List[Any] = """https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"""
__lowerCAmelCase : Union[str, Any] = requests.get(__A ).json()[:max_stories]
return [get_hackernews_story(__A ) for story_id in story_ids]
def snake_case_ (__A : int = 1_0 ) -> str:
__lowerCAmelCase : Optional[Any] = hackernews_top_stories(__A )
return "\n".join("""* [{title}]({url})""".format(**__A ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 651
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__UpperCAmelCase = {"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 651
| 1
|
"""simple docstring"""
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
lowercase__ : Optional[Any] = logging.get_logger(__name__)
lowercase__ : List[Any] = TypeVar("""DatasetType""", Dataset, IterableDataset)
def UpperCamelCase_ ( lowerCAmelCase__ : List[DatasetType] , lowerCAmelCase__ : Optional[List[float]] = None , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[DatasetInfo] = None , lowerCAmelCase__ : Optional[NamedSplit] = None , lowerCAmelCase__ : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType:
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ , (Dataset, IterableDataset) ):
if isinstance(lowerCAmelCase__ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'is an empty dataset dictionary.' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(lowerCAmelCase__ )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(lowerCAmelCase__ ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowerCAmelCase__ ).__name__}." )
if i == 0:
lowerCAmelCase_ ,lowerCAmelCase_ : Optional[Any] = (
(Dataset, IterableDataset) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else (IterableDataset, Dataset)
)
elif not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , info=lowerCAmelCase__ , split=lowerCAmelCase__ , stopping_strategy=lowerCAmelCase__ )
else:
return _interleave_iterable_datasets(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , info=lowerCAmelCase__ , split=lowerCAmelCase__ , stopping_strategy=lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ : List[DatasetType] , lowerCAmelCase__ : Optional[DatasetInfo] = None , lowerCAmelCase__ : Optional[NamedSplit] = None , lowerCAmelCase__ : int = 0 , ) -> DatasetType:
"""simple docstring"""
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ , (Dataset, IterableDataset) ):
if isinstance(lowerCAmelCase__ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'is an empty dataset dictionary.' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(lowerCAmelCase__ )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(lowerCAmelCase__ ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowerCAmelCase__ ).__name__}." )
if i == 0:
lowerCAmelCase_ ,lowerCAmelCase_ : int = (
(Dataset, IterableDataset) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else (IterableDataset, Dataset)
)
elif not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(lowerCAmelCase__ , info=lowerCAmelCase__ , split=lowerCAmelCase__ , axis=lowerCAmelCase__ )
else:
return _concatenate_iterable_datasets(lowerCAmelCase__ , info=lowerCAmelCase__ , split=lowerCAmelCase__ , axis=lowerCAmelCase__ )
| 317
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : Optional[int] = {
"""configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
"""TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimesformerModel""",
"""TimesformerForVideoClassification""",
"""TimesformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowercase__ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 317
| 1
|
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def UpperCamelCase (SCREAMING_SNAKE_CASE = 8 ):
UpperCamelCase : Union[str, Any] = ascii_letters + digits + punctuation
return "".join(secrets.choice(SCREAMING_SNAKE_CASE ) for _ in range(SCREAMING_SNAKE_CASE ) )
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(SCREAMING_SNAKE_CASE )
UpperCamelCase : int = i // 3
UpperCamelCase : Any = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
UpperCamelCase : str = (
chars_incl
+ random(SCREAMING_SNAKE_CASE , quotient + remainder )
+ random(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
+ random(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
)
UpperCamelCase : Tuple = list(SCREAMING_SNAKE_CASE )
shuffle(SCREAMING_SNAKE_CASE )
return "".join(SCREAMING_SNAKE_CASE )
# random is a generalised function for letters, characters and numbers
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return "".join(secrets.choice(SCREAMING_SNAKE_CASE ) for _ in range(SCREAMING_SNAKE_CASE ) )
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
pass # Put your code here...
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
pass # Put your code here...
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
pass # Put your code here...
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 8 ):
if len(SCREAMING_SNAKE_CASE ) < min_length:
# Your Password must be at least 8 characters long
return False
UpperCamelCase : str = any(char in ascii_uppercase for char in password )
UpperCamelCase : Tuple = any(char in ascii_lowercase for char in password )
UpperCamelCase : Union[str, Any] = any(char in digits for char in password )
UpperCamelCase : Optional[Any] = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def UpperCamelCase ():
UpperCamelCase : Union[str, Any] = int(input("""Please indicate the max length of your password: """ ).strip() )
UpperCamelCase : Optional[int] = input(
"""Please indicate the characters that must be in your password: """ ).strip()
print("""Password generated:""" , password_generator(SCREAMING_SNAKE_CASE ) )
print(
"""Alternative Password generated:""" , alternative_password_generator(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , )
print("""[If you are thinking of using this passsword, You better save it.]""" )
if __name__ == "__main__":
main()
| 102
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__a = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __UpperCAmelCase ( a_: str ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(a_ )
def __UpperCAmelCase ( a_: str ):
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCAmelCase : Any = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(a_, id=a_ )
| 494
| 0
|
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler")
class __magic_name__ :
def __init__( self : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : bool = False ) -> Tuple:
UpperCAmelCase = scheduler
UpperCAmelCase = optimizers if isinstance(lowerCAmelCase__ , (list, tuple) ) else [optimizers]
UpperCAmelCase = split_batches
UpperCAmelCase = step_with_optimizer
UpperCAmelCase = GradientState()
def _UpperCamelCase ( self : int , *lowerCAmelCase__ : Dict , **lowerCAmelCase__ : Optional[Any] ) -> Optional[Any]:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*lowerCAmelCase__ , **lowerCAmelCase__ )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*lowerCAmelCase__ , **lowerCAmelCase__ )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
UpperCAmelCase = AcceleratorState().num_processes
for _ in range(lowerCAmelCase__ ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , "total_steps" ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*lowerCAmelCase__ , **lowerCAmelCase__ )
else:
self.scheduler.step(*lowerCAmelCase__ , **lowerCAmelCase__ )
def _UpperCamelCase ( self : Dict ) -> List[Any]:
return self.scheduler.get_last_lr()
def _UpperCamelCase ( self : Optional[int] ) -> str:
return self.scheduler.state_dict()
def _UpperCamelCase ( self : Tuple , lowerCAmelCase__ : str ) -> Union[str, Any]:
self.scheduler.load_state_dict(lowerCAmelCase__ )
def _UpperCamelCase ( self : Dict ) -> str:
return self.scheduler.get_lr()
def _UpperCamelCase ( self : Union[str, Any] , *lowerCAmelCase__ : Union[str, Any] , **lowerCAmelCase__ : List[str] ) -> List[Any]:
return self.scheduler.print_lr(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 1
|
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCAmelCase__ = {"UserAgent": UserAgent().random}
def _lowerCAmelCase( __A ):
UpperCAmelCase = script.contents[0]
UpperCAmelCase = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __magic_name__ :
def __init__( self : Optional[Any] , lowerCAmelCase__ : Optional[int] ) -> Any:
UpperCAmelCase = f"https://www.instagram.com/{username}/"
UpperCAmelCase = self.get_json()
def _UpperCamelCase ( self : List[str] ) -> dict:
UpperCAmelCase = requests.get(self.url , headers=lowerCAmelCase__ ).text
UpperCAmelCase = BeautifulSoup(lowerCAmelCase__ , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Tuple ) -> str:
return f"{self.__class__.__name__}('{self.username}')"
def __str__( self : Optional[int] ) -> str:
return f"{self.fullname} ({self.username}) is {self.biography}"
@property
def _UpperCamelCase ( self : Any ) -> str:
return self.user_data["username"]
@property
def _UpperCamelCase ( self : List[Any] ) -> str:
return self.user_data["full_name"]
@property
def _UpperCamelCase ( self : List[str] ) -> str:
return self.user_data["biography"]
@property
def _UpperCamelCase ( self : Optional[int] ) -> str:
return self.user_data["business_email"]
@property
def _UpperCamelCase ( self : str ) -> str:
return self.user_data["external_url"]
@property
def _UpperCamelCase ( self : int ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def _UpperCamelCase ( self : List[Any] ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def _UpperCamelCase ( self : List[str] ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _UpperCamelCase ( self : Tuple ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def _UpperCamelCase ( self : Optional[int] ) -> bool:
return self.user_data["is_verified"]
@property
def _UpperCamelCase ( self : Optional[Any] ) -> bool:
return self.user_data["is_private"]
def _lowerCAmelCase( __A = "github" ):
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
UpperCAmelCase = InstagramUser(__A )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __A )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = InstagramUser("github")
print(instagram_user)
print(f"{instagram_user.number_of_posts = }")
print(f"{instagram_user.number_of_followers = }")
print(f"{instagram_user.number_of_followings = }")
print(f"{instagram_user.email = }")
print(f"{instagram_user.website = }")
print(f"{instagram_user.profile_picture_url = }")
print(f"{instagram_user.is_verified = }")
print(f"{instagram_user.is_private = }")
| 1
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ : Union[str, Any] ={
'''configuration_mobilebert''': [
'''MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileBertConfig''',
'''MobileBertOnnxConfig''',
],
'''tokenization_mobilebert''': ['''MobileBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Tuple =['''MobileBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Optional[Any] =[
'''MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileBertForMaskedLM''',
'''MobileBertForMultipleChoice''',
'''MobileBertForNextSentencePrediction''',
'''MobileBertForPreTraining''',
'''MobileBertForQuestionAnswering''',
'''MobileBertForSequenceClassification''',
'''MobileBertForTokenClassification''',
'''MobileBertLayer''',
'''MobileBertModel''',
'''MobileBertPreTrainedModel''',
'''load_tf_weights_in_mobilebert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Any =[
'''TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileBertForMaskedLM''',
'''TFMobileBertForMultipleChoice''',
'''TFMobileBertForNextSentencePrediction''',
'''TFMobileBertForPreTraining''',
'''TFMobileBertForQuestionAnswering''',
'''TFMobileBertForSequenceClassification''',
'''TFMobileBertForTokenClassification''',
'''TFMobileBertMainLayer''',
'''TFMobileBertModel''',
'''TFMobileBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 148
|
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : Dict =logging.get_logger(__name__)
lowerCAmelCase__ : int ={
'''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = '''data2vec-audio'''
def __init__( self , _A=32 , _A=768 , _A=12 , _A=12 , _A=3_072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=0.1 , _A=0.0 , _A=0.1 , _A=0.1 , _A=0.0_2 , _A=1e-5 , _A="gelu" , _A=(512, 512, 512, 512, 512, 512, 512) , _A=(5, 2, 2, 2, 2, 2, 2) , _A=(10, 3, 3, 3, 3, 2, 2) , _A=False , _A=16 , _A=19 , _A=5 , _A=0.0_5 , _A=10 , _A=2 , _A=0.0 , _A=10 , _A=0 , _A="sum" , _A=False , _A=False , _A=256 , _A=(512, 512, 512, 512, 1_500) , _A=(5, 3, 3, 1, 1) , _A=(1, 2, 3, 1, 1) , _A=512 , _A=0 , _A=1 , _A=2 , _A=False , _A=3 , _A=2 , _A=3 , _A=None , **_A , ):
'''simple docstring'''
super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A )
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = feat_extract_activation
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = conv_bias
__SCREAMING_SNAKE_CASE = num_conv_pos_embeddings
__SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups
__SCREAMING_SNAKE_CASE = conv_pos_kernel_size
__SCREAMING_SNAKE_CASE = len(self.conv_dim )
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = feat_proj_dropout
__SCREAMING_SNAKE_CASE = final_dropout
__SCREAMING_SNAKE_CASE = layerdrop
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__SCREAMING_SNAKE_CASE = mask_time_prob
__SCREAMING_SNAKE_CASE = mask_time_length
__SCREAMING_SNAKE_CASE = mask_time_min_masks
__SCREAMING_SNAKE_CASE = mask_feature_prob
__SCREAMING_SNAKE_CASE = mask_feature_length
__SCREAMING_SNAKE_CASE = mask_feature_min_masks
# ctc loss
__SCREAMING_SNAKE_CASE = ctc_loss_reduction
__SCREAMING_SNAKE_CASE = ctc_zero_infinity
# adapter
__SCREAMING_SNAKE_CASE = add_adapter
__SCREAMING_SNAKE_CASE = adapter_kernel_size
__SCREAMING_SNAKE_CASE = adapter_stride
__SCREAMING_SNAKE_CASE = num_adapter_layers
__SCREAMING_SNAKE_CASE = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__SCREAMING_SNAKE_CASE = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = xvector_output_dim
@property
def _A ( self ):
'''simple docstring'''
return math.prod(self.conv_stride )
| 148
| 1
|
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
a__ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
a__ , a__ = emb.weight.shape
a__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
a__ = emb.weight.data
return lin_layer
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
a__ = torch.load(__lowerCAmelCase , map_location="cpu" )
a__ = mam_aaa["args"] or mam_aaa["cfg"]["model"]
a__ = mam_aaa["model"]
remove_ignore_keys_(__lowerCAmelCase )
a__ = state_dict["encoder.embed_tokens.weight"].shape[0]
a__ = MaMaaaConfig(
vocab_size=__lowerCAmelCase , max_position_embeddings=10_24 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , )
a__ = state_dict["decoder.embed_tokens.weight"]
a__ = MaMaaaForConditionalGeneration(__lowerCAmelCase )
model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
a__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
UpperCamelCase_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
UpperCamelCase_ : Tuple = parser.parse_args()
UpperCamelCase_ : List[Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 709
|
'''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
UpperCamelCase_ : str = """scheduler_config.json"""
class lowerCamelCase__ ( __lowerCamelCase ):
"""simple docstring"""
UpperCamelCase__ = 1
UpperCamelCase__ = 2
UpperCamelCase__ = 3
UpperCamelCase__ = 4
UpperCamelCase__ = 5
@dataclass
class lowerCamelCase__ ( __lowerCamelCase ):
"""simple docstring"""
UpperCamelCase__ = 42
class lowerCamelCase__ :
"""simple docstring"""
UpperCamelCase__ = SCHEDULER_CONFIG_NAME
UpperCamelCase__ = ['''dtype''']
UpperCamelCase__ = []
UpperCamelCase__ = True
@classmethod
def lowerCAmelCase_ ( cls : Optional[Any] ,a__ : Dict[str, Any] = None ,a__ : Optional[str] = None ,a__ : Union[str, Any]=False ,**a__ : Tuple ,):
a__ , a__ = cls.load_config(
pretrained_model_name_or_path=a__ ,subfolder=a__ ,return_unused_kwargs=a__ ,**a__ ,)
a__ , a__ = cls.from_config(a__ ,return_unused_kwargs=a__ ,**a__ )
if hasattr(a__ ,"create_state" ) and getattr(a__ ,"has_state" ,a__ ):
a__ = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def lowerCAmelCase_ ( self : Any ,a__ : Union[str, os.PathLike] ,a__ : bool = False ,**a__ : Optional[int] ):
self.save_config(save_directory=a__ ,push_to_hub=a__ ,**a__ )
@property
def lowerCAmelCase_ ( self : List[str] ):
return self._get_compatibles()
@classmethod
def lowerCAmelCase_ ( cls : str ):
a__ = list(set([cls.__name__] + cls._compatibles ) )
a__ = importlib.import_module(__name__.split("." )[0] )
a__ = [
getattr(a__ ,a__ ) for c in compatible_classes_str if hasattr(a__ ,a__ )
]
return compatible_classes
def _lowerCAmelCase (_lowercase , _lowercase ):
"""simple docstring"""
assert len(_lowercase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_lowercase ) - x.ndim) ) , _lowercase )
def _lowerCAmelCase (_lowercase , _lowercase=0.999 , _lowercase=jnp.floataa ):
"""simple docstring"""
def alpha_bar(_lowercase ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
a__ = []
for i in range(_lowercase ):
a__ = i / num_diffusion_timesteps
a__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_lowercase ) / alpha_bar(_lowercase ) , _lowercase ) )
return jnp.array(_lowercase , dtype=_lowercase )
@flax.struct.dataclass
class lowerCamelCase__ :
"""simple docstring"""
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
@classmethod
def lowerCAmelCase_ ( cls : Tuple ,a__ : List[Any] ):
a__ = scheduler.config
if config.trained_betas is not None:
a__ = jnp.asarray(config.trained_betas ,dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
a__ = jnp.linspace(config.beta_start ,config.beta_end ,config.num_train_timesteps ,dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
a__ = (
jnp.linspace(
config.beta_start**0.5 ,config.beta_end**0.5 ,config.num_train_timesteps ,dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
a__ = betas_for_alpha_bar(config.num_train_timesteps ,dtype=scheduler.dtype )
else:
raise NotImplementedError(
f'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' )
a__ = 1.0 - betas
a__ = jnp.cumprod(a__ ,axis=0 )
return cls(
alphas=a__ ,betas=a__ ,alphas_cumprod=a__ ,)
def _lowerCAmelCase (_lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
a__ = state.alphas_cumprod
a__ = alphas_cumprod[timesteps] ** 0.5
a__ = sqrt_alpha_prod.flatten()
a__ = broadcast_to_shape_from_left(_lowercase , original_samples.shape )
a__ = (1 - alphas_cumprod[timesteps]) ** 0.5
a__ = sqrt_one_minus_alpha_prod.flatten()
a__ = broadcast_to_shape_from_left(_lowercase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def _lowerCAmelCase (_lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
a__ , a__ = get_sqrt_alpha_prod(_lowercase , _lowercase , _lowercase , _lowercase )
a__ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def _lowerCAmelCase (_lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
a__ , a__ = get_sqrt_alpha_prod(_lowercase , _lowercase , _lowercase , _lowercase )
a__ = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 394
| 0
|
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class a_ :
def __init__( self : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : str=13 , UpperCAmelCase__ : Union[str, Any]=10 , UpperCAmelCase__ : Any=3 , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Dict=32 , UpperCAmelCase__ : str=5 , UpperCAmelCase__ : Dict=4 , UpperCAmelCase__ : Any=37 , UpperCAmelCase__ : Dict="gelu" , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : int=10 , UpperCAmelCase__ : str=0.02 , UpperCAmelCase__ : List[str]=0.9 , UpperCAmelCase__ : Any=None , ):
"""simple docstring"""
snake_case : List[str] = parent
snake_case : List[str] = batch_size
snake_case : str = image_size
snake_case : Optional[int] = num_channels
snake_case : Any = patch_size
snake_case : Optional[Any] = tubelet_size
snake_case : int = num_frames
snake_case : Any = is_training
snake_case : Optional[Any] = use_labels
snake_case : int = hidden_size
snake_case : List[str] = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : List[str] = intermediate_size
snake_case : List[str] = hidden_act
snake_case : Tuple = hidden_dropout_prob
snake_case : str = attention_probs_dropout_prob
snake_case : Union[str, Any] = type_sequence_label_size
snake_case : Union[str, Any] = initializer_range
snake_case : List[Any] = mask_ratio
snake_case : List[Any] = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
snake_case : Optional[int] = (image_size // patch_size) ** 2
snake_case : Any = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
snake_case : Any = int(mask_ratio * self.seq_length )
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : str = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
snake_case : Any = None
if self.use_labels:
snake_case : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase( self : int ):
"""simple docstring"""
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict ):
"""simple docstring"""
snake_case : Optional[Any] = VideoMAEModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : Optional[int] = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict ):
"""simple docstring"""
snake_case : int = VideoMAEForPreTraining(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
snake_case : Optional[Any] = torch.ones((self.num_masks,) )
snake_case : Dict = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
snake_case : List[Any] = mask.expand(self.batch_size , -1 ).bool()
snake_case : int = model(UpperCAmelCase__ , UpperCAmelCase__ )
# model only returns predictions for masked patches
snake_case : Tuple = mask.sum().item()
snake_case : Dict = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : List[Any] = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case : int = config_and_inputs
snake_case : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( a , a , unittest.TestCase ):
A__ : List[Any] = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
A__ : Union[str, Any] = (
{'feature-extraction': VideoMAEModel, 'video-classification': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
A__ : Tuple = False
A__ : Any = False
A__ : Dict = False
A__ : int = False
def lowerCAmelCase( self : Any ):
"""simple docstring"""
snake_case : List[str] = VideoMAEModelTester(self )
snake_case : int = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37 )
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str=False ):
"""simple docstring"""
snake_case : List[str] = copy.deepcopy(UpperCAmelCase__ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
snake_case : str = torch.ones((self.model_tester.num_masks,) )
snake_case : Union[str, Any] = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
snake_case : Any = mask.expand(self.model_tester.batch_size , -1 ).bool()
snake_case : Union[str, Any] = bool_masked_pos.to(UpperCAmelCase__ )
if return_labels:
if model_class in [
*get_values(UpperCAmelCase__ ),
]:
snake_case : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ )
return inputs_dict
def lowerCAmelCase( self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''VideoMAE does not use inputs_embeds''' )
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
pass
def lowerCAmelCase( self : Any ):
"""simple docstring"""
snake_case , snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : int = model_class(UpperCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase__ , nn.Linear ) )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
snake_case , snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : int = model_class(UpperCAmelCase__ )
snake_case : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : Any = [*signature.parameters.keys()]
snake_case : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase__ )
@slow
def lowerCAmelCase( self : Any ):
"""simple docstring"""
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : Optional[Any] = VideoMAEModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
if not self.has_attentions:
pass
else:
snake_case , snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : Union[str, Any] = True
for model_class in self.all_model_classes:
snake_case : str = self.model_tester.seq_length - self.model_tester.num_masks
snake_case : Tuple = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
snake_case : Dict = True
snake_case : int = False
snake_case : Optional[Any] = True
snake_case : int = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
snake_case : Union[str, Any] = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
snake_case : Optional[int] = outputs.attentions
self.assertEqual(len(UpperCAmelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case : Optional[Any] = True
snake_case : str = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
snake_case : Dict = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
snake_case : Optional[int] = outputs.attentions
self.assertEqual(len(UpperCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
snake_case : Union[str, Any] = len(UpperCAmelCase__ )
# Check attention is always last and order is fine
snake_case : Optional[int] = True
snake_case : List[Any] = True
snake_case : Any = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
snake_case : Dict = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
self.assertEqual(out_len + 1 , len(UpperCAmelCase__ ) )
snake_case : int = outputs.attentions
self.assertEqual(len(UpperCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
def check_hidden_states_output(UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] ):
snake_case : int = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
snake_case : List[str] = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
snake_case : Optional[Any] = outputs.hidden_states
snake_case : str = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(UpperCAmelCase__ ) , UpperCAmelCase__ )
snake_case : Optional[int] = self.model_tester.seq_length - self.model_tester.num_masks
snake_case : int = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
snake_case , snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Optional[int] = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case : int = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
pass
def a_ ( ) -> List[Any]:
"""simple docstring"""
snake_case : Union[str, Any] = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
snake_case : Tuple = np.load(__magic_name__ )
return list(__magic_name__ )
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase( self : Any ):
"""simple docstring"""
snake_case : Dict = VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''' ).to(
UpperCAmelCase__ )
snake_case : Optional[Any] = self.default_image_processor
snake_case : int = prepare_video()
snake_case : Dict = image_processor(UpperCAmelCase__ , return_tensors='''pt''' ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
snake_case : int = model(**UpperCAmelCase__ )
# verify the logits
snake_case : str = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
snake_case : List[Any] = torch.tensor([0.3669, -0.0688, -0.2421] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4 ) )
@slow
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Tuple = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ).to(UpperCAmelCase__ )
snake_case : Optional[Any] = self.default_image_processor
snake_case : Union[str, Any] = prepare_video()
snake_case : Optional[int] = image_processor(UpperCAmelCase__ , return_tensors='''pt''' ).to(UpperCAmelCase__ )
# add boolean mask, indicating which patches to mask
snake_case : List[Any] = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
snake_case : int = torch.load(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
snake_case : List[Any] = model(**UpperCAmelCase__ )
# verify the logits
snake_case : Dict = torch.Size([1, 1_408, 1_536] )
snake_case : int = torch.tensor(
[[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] , device=UpperCAmelCase__ )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCAmelCase__ , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
snake_case : Optional[int] = torch.tensor([0.5142] , device=UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.loss , UpperCAmelCase__ , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
snake_case : Optional[int] = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' , norm_pix_loss=UpperCAmelCase__ ).to(
UpperCAmelCase__ )
with torch.no_grad():
snake_case : Optional[Any] = model(**UpperCAmelCase__ )
snake_case : Tuple = torch.tensor(torch.tensor([0.6469] ) , device=UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.loss , UpperCAmelCase__ , atol=1e-4 ) )
| 598
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class a_ ( unittest.TestCase ):
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
snake_case : Tuple = tempfile.mkdtemp()
snake_case : Optional[int] = BlipImageProcessor()
snake_case : Dict = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
snake_case : Tuple = BlipaProcessor(UpperCAmelCase__ , UpperCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
def lowerCAmelCase( self : List[Any] , **UpperCAmelCase__ : Any ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).tokenizer
def lowerCAmelCase( self : List[str] , **UpperCAmelCase__ : Any ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).image_processor
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
snake_case : List[Any] = [Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : str = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case : str = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
snake_case : Optional[int] = self.get_image_processor(do_normalize=UpperCAmelCase__ , padding_value=1.0 )
snake_case : Optional[Any] = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : str = self.get_image_processor()
snake_case : Optional[Any] = self.get_tokenizer()
snake_case : int = BlipaProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
snake_case : int = self.prepare_image_inputs()
snake_case : Optional[int] = image_processor(UpperCAmelCase__ , return_tensors='''np''' )
snake_case : str = processor(images=UpperCAmelCase__ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Optional[Any] = self.get_image_processor()
snake_case : Optional[Any] = self.get_tokenizer()
snake_case : List[Any] = BlipaProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
snake_case : Any = '''lower newer'''
snake_case : List[Any] = processor(text=UpperCAmelCase__ )
snake_case : Optional[int] = tokenizer(UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
snake_case : List[Any] = self.get_image_processor()
snake_case : Any = self.get_tokenizer()
snake_case : Any = BlipaProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
snake_case : List[Any] = '''lower newer'''
snake_case : str = self.prepare_image_inputs()
snake_case : Optional[int] = processor(text=UpperCAmelCase__ , images=UpperCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase__ ):
processor()
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : str = self.get_image_processor()
snake_case : int = self.get_tokenizer()
snake_case : Any = BlipaProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
snake_case : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case : Tuple = processor.batch_decode(UpperCAmelCase__ )
snake_case : str = tokenizer.batch_decode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : List[Any] = self.get_image_processor()
snake_case : Any = self.get_tokenizer()
snake_case : Optional[Any] = BlipaProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
snake_case : Any = '''lower newer'''
snake_case : Optional[Any] = self.prepare_image_inputs()
snake_case : str = processor(text=UpperCAmelCase__ , images=UpperCAmelCase__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 598
| 1
|
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> list:
UpperCamelCase__ : Tuple = len(lowerCamelCase_)
UpperCamelCase__ : int = [[0] * n for i in range(lowerCamelCase_)]
for i in range(lowerCamelCase_):
UpperCamelCase__ : Dict = y_points[i]
for i in range(2 , lowerCamelCase_):
for j in range(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase__ : Tuple = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6
|
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def __UpperCAmelCase ( lowerCamelCase_) -> List[Tuple[int, ...]]:
UpperCamelCase__ : int = []
if isinstance(lowerCamelCase_ , lowerCamelCase_):
for v in tree.values():
shapes.extend(_fetch_dims(lowerCamelCase_))
elif isinstance(lowerCamelCase_ , (list, tuple)):
for t in tree:
shapes.extend(_fetch_dims(lowerCamelCase_))
elif isinstance(lowerCamelCase_ , torch.Tensor):
shapes.append(tree.shape)
else:
raise ValueError('Not supported')
return shapes
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Tuple[int, ...]:
UpperCamelCase__ : int = []
for d in reversed(lowerCamelCase_):
idx.append(flat_idx % d)
UpperCamelCase__ : Any = flat_idx // d
return tuple(reversed(lowerCamelCase_))
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> List[Tuple[slice, ...]]:
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(lowerCamelCase_) -> None:
UpperCamelCase__ : Tuple = True
for i in range(len(lowerCamelCase_)):
UpperCamelCase__ : List[Any] = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCamelCase__ : Optional[Any] = l[reversed_idx]
if start_edges is None:
UpperCamelCase__ : int = [s == 0 for s in start]
reduce_edge_list(lowerCamelCase_)
if end_edges is None:
UpperCamelCase__ : List[str] = [e == (d - 1) for e, d in zip(lowerCamelCase_ , lowerCamelCase_)]
reduce_edge_list(lowerCamelCase_)
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowerCamelCase_) == 0:
return [()]
elif len(lowerCamelCase_) == 1:
return [(slice(start[0] , end[0] + 1),)]
UpperCamelCase__ : List[Tuple[slice, ...]] = []
UpperCamelCase__ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowerCamelCase_ , lowerCamelCase_):
if s == e:
path_list.append(slice(lowerCamelCase_ , s + 1))
else:
break
UpperCamelCase__ : Tuple[slice, ...] = tuple(lowerCamelCase_)
UpperCamelCase__ : Dict = len(lowerCamelCase_)
# start == end, and we're done
if divergence_idx == len(lowerCamelCase_):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase__ : str = start[divergence_idx]
return tuple(
path + (slice(lowerCamelCase_ , sdi + 1),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ))
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase__ : Optional[int] = end[divergence_idx]
return tuple(
path + (slice(lowerCamelCase_ , edi + 1),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ))
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1),))
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx]),))
slices.extend(lower())
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper())
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1),))
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper())
UpperCamelCase__ : Dict = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx]),))
slices.extend(lower())
return slices
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> torch.Tensor:
UpperCamelCase__ : List[Any] = t.shape[:no_batch_dims]
UpperCamelCase__ : Optional[int] = list(_flat_idx_to_idx(lowerCamelCase_ , lowerCamelCase_))
# _get_minimal_slice_set is inclusive
UpperCamelCase__ : Dict = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase_))
# Get an ordered list of slices to perform
UpperCamelCase__ : int = _get_minimal_slice_set(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
UpperCamelCase__ : List[Any] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:]) for s in sliced_tensors])
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = False , ) -> Any:
if not (len(lowerCamelCase_) > 0):
raise ValueError('Must provide at least one input')
UpperCamelCase__ : int = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase_)]
UpperCamelCase__ : int = tuple([max(lowerCamelCase_) for s in zip(*lowerCamelCase_)])
def _prep_inputs(lowerCamelCase_) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims]) == no_batch_dims:
UpperCamelCase__ : List[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
UpperCamelCase__ : Optional[int] = t.reshape(-1 , *t.shape[no_batch_dims:])
else:
UpperCamelCase__ : Optional[int] = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
return t
UpperCamelCase__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , lowerCamelCase_)
UpperCamelCase__ : int = None
if _out is not None:
UpperCamelCase__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase_: t.view([-1] + list(t.shape[no_batch_dims:])) , _out)
UpperCamelCase__ : Dict = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCamelCase__ : int = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowerCamelCase_) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : Optional[Any] = prepped_outputs
for _ in range(lowerCamelCase_):
# Chunk the input
if not low_mem:
UpperCamelCase__ : str = _select_chunk
else:
UpperCamelCase__ : List[Any] = partial(
_chunk_slice , flat_start=lowerCamelCase_ , flat_end=min(lowerCamelCase_ , i + chunk_size) , no_batch_dims=len(lowerCamelCase_) , )
UpperCamelCase__ : Dict[str, Any] = tensor_tree_map(lowerCamelCase_ , lowerCamelCase_)
# Run the layer on the chunk
UpperCamelCase__ : List[Any] = layer(**lowerCamelCase_)
# Allocate space for the output
if out is None:
UpperCamelCase__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase_: t.new_zeros((flat_batch_dim,) + t.shape[1:]) , lowerCamelCase_)
# Put the chunk in its pre-allocated space
if isinstance(lowerCamelCase_ , lowerCamelCase_):
def assign(lowerCamelCase_ , lowerCamelCase_) -> None:
for k, v in da.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_):
assign(lowerCamelCase_ , da[k])
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCamelCase__ : List[str] = da[k]
assign(lowerCamelCase_ , lowerCamelCase_)
elif isinstance(lowerCamelCase_ , lowerCamelCase_):
for xa, xa in zip(lowerCamelCase_ , lowerCamelCase_):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCamelCase__ : int = xa
elif isinstance(lowerCamelCase_ , torch.Tensor):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCamelCase__ : Dict = output_chunk
else:
raise ValueError('Not supported')
i += chunk_size
UpperCamelCase__ : int = tensor_tree_map(lambda lowerCamelCase_: t.view(orig_batch_dims + t.shape[1:]) , lowerCamelCase_)
return out
class __lowercase :
def __init__( self : List[str] , UpperCAmelCase_ : int = 512 , ):
UpperCamelCase__ : str = max_chunk_size
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : Optional[tuple] = None
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Callable , UpperCAmelCase_ : tuple , UpperCAmelCase_ : int):
logging.info('Tuning chunk size...')
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCamelCase__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2)) + 1)]
UpperCamelCase__ : List[Any] = [c for c in candidates if c > min_chunk_size]
UpperCamelCase__ : List[Any] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(UpperCAmelCase_ : int) -> bool:
try:
with torch.no_grad():
fn(*UpperCAmelCase_ , chunk_size=UpperCAmelCase_)
return True
except RuntimeError:
return False
UpperCamelCase__ : Tuple = 0
UpperCamelCase__ : Dict = len(UpperCAmelCase_) - 1
while i > min_viable_chunk_size_index:
UpperCamelCase__ : Optional[int] = test_chunk_size(candidates[i])
if not viable:
UpperCamelCase__ : Tuple = (min_viable_chunk_size_index + i) // 2
else:
UpperCamelCase__ : Optional[int] = i
UpperCamelCase__ : Dict = (i + len(UpperCAmelCase_) - 1) // 2
return candidates[min_viable_chunk_size_index]
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Iterable , UpperCAmelCase_ : Iterable):
UpperCamelCase__ : List[str] = True
for aa, aa in zip(UpperCAmelCase_ , UpperCAmelCase_):
assert type(UpperCAmelCase_) == type(UpperCAmelCase_)
if isinstance(UpperCAmelCase_ , (list, tuple)):
consistent &= self._compare_arg_caches(UpperCAmelCase_ , UpperCAmelCase_)
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_):
UpperCamelCase__ : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase_: x[0])]
UpperCamelCase__ : str = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase_: x[0])]
consistent &= self._compare_arg_caches(UpperCAmelCase_ , UpperCAmelCase_)
else:
consistent &= aa == aa
return consistent
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Callable , UpperCAmelCase_ : tuple , UpperCAmelCase_ : int , ):
UpperCamelCase__ : List[Any] = True
UpperCamelCase__ : tuple = tree_map(lambda UpperCAmelCase_: a.shape if isinstance(UpperCAmelCase_ , torch.Tensor) else a , UpperCAmelCase_ , UpperCAmelCase_)
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data) == len(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = self._compare_arg_caches(self.cached_arg_data , UpperCAmelCase_)
else:
# Otherwise, we can reuse the precomputed value
UpperCamelCase__ : Optional[int] = False
if not consistent:
UpperCamelCase__ : Tuple = self._determine_favorable_chunk_size(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
UpperCamelCase__ : Optional[Any] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 6
| 1
|
'''simple docstring'''
from __future__ import annotations
__snake_case ="""Muhammad Umer Farooq"""
__snake_case ="""MIT"""
__snake_case ="""1.0.0"""
__snake_case ="""Muhammad Umer Farooq"""
__snake_case ="""contact@muhammadumerfarooq.me"""
__snake_case ="""Alpha"""
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Optional[int] , UpperCAmelCase__ : str ) -> None:
super().__init__()
lowerCAmelCase = []
lowerCAmelCase = domain
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : list[tuple[str, str | None]] ) -> None:
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
lowerCAmelCase = parse.urljoin(self.domain , UpperCAmelCase__ )
self.urls.append(UpperCAmelCase__ )
def a_ ( lowerCamelCase : str ):
return ".".join(get_sub_domain_name(lowerCamelCase ).split('.' )[-2:] )
def a_ ( lowerCamelCase : str ):
return parse.urlparse(lowerCamelCase ).netloc
def a_ ( lowerCamelCase : str = "https://github.com" ):
lowerCAmelCase = get_domain_name(lowerCamelCase )
# Initialize the parser
lowerCAmelCase = Parser(lowerCamelCase )
try:
# Open URL
lowerCAmelCase = requests.get(lowerCamelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
lowerCAmelCase = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
lowerCAmelCase = requests.get(lowerCamelCase )
# Get the valid email.
lowerCAmelCase = re.findall('[a-zA-Z0-9]+@' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(lowerCamelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(lowerCamelCase )
if __name__ == "__main__":
__snake_case =emails_from_url("""https://github.com""")
print(F'''{len(emails)} emails found:''')
print("""\n""".join(sorted(emails)))
| 133
|
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Optional[int] = ['''image_processor''', '''tokenizer''']
lowerCamelCase : Dict = '''BlipImageProcessor'''
lowerCamelCase : List[str] = '''AutoTokenizer'''
def __init__( self : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] ) -> Dict:
lowerCAmelCase = False
super().__init__(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = self.image_processor
def __call__( self : Union[str, Any] , UpperCAmelCase__ : ImageInput = None , UpperCAmelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase__ : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , **UpperCAmelCase__ : Optional[int] , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
lowerCAmelCase = self.tokenizer
lowerCAmelCase = self.tokenizer(
text=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , stride=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , return_overflowing_tokens=UpperCAmelCase__ , return_special_tokens_mask=UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_length=UpperCAmelCase__ , verbose=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ , )
return text_encoding
# add pixel_values
lowerCAmelCase = self.image_processor(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ )
if text is not None:
lowerCAmelCase = self.tokenizer(
text=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , stride=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , return_overflowing_tokens=UpperCAmelCase__ , return_special_tokens_mask=UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_length=UpperCAmelCase__ , verbose=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ , )
else:
lowerCAmelCase = None
if text_encoding is not None:
encoding_image_processor.update(UpperCAmelCase__ )
return encoding_image_processor
def __UpperCAmelCase ( self : List[str] , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Tuple ) -> int:
return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[Any] , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : Dict ) -> Optional[int]:
return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __UpperCAmelCase ( self : int ) -> str:
lowerCAmelCase = self.tokenizer.model_input_names
lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 133
| 1
|
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class a__ :
def __init__( self , UpperCAmelCase , UpperCAmelCase = 1_3 , UpperCAmelCase = 6_4 , UpperCAmelCase = 2 , UpperCAmelCase = 3 , UpperCAmelCase = 3 , UpperCAmelCase = True , UpperCAmelCase = True , UpperCAmelCase = 1_2_8 , UpperCAmelCase=[1_6, 3_2, 6_4, 1_2_8] , UpperCAmelCase = 7 , UpperCAmelCase = 4 , UpperCAmelCase = 3_7 , UpperCAmelCase = "gelu" , UpperCAmelCase = 0.1 , UpperCAmelCase = 0.1 , UpperCAmelCase = 1_0 , UpperCAmelCase = 0.02 , UpperCAmelCase = 2 , UpperCAmelCase = 1 , UpperCAmelCase = 1_2_8 , UpperCAmelCase = [2, 2, 2, 2] , UpperCAmelCase = 2 , UpperCAmelCase = 2 , ) -> Optional[Any]:
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = is_training
__a = use_labels
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = type_sequence_label_size
__a = initializer_range
__a = encoder_stride
__a = num_attention_outputs
__a = embed_dim
__a = embed_dim + 1
__a = resolution
__a = depths
__a = hidden_sizes
__a = dim
__a = mlp_expansion_ratio
def __SCREAMING_SNAKE_CASE ( self ) -> int:
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
__a = TFEfficientFormerModel(config=UpperCAmelCase )
__a = model(UpperCAmelCase , training=UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
__a = self.type_sequence_label_size
__a = TFEfficientFormerForImageClassification(UpperCAmelCase )
__a = model(UpperCAmelCase , labels=UpperCAmelCase , training=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__a = 1
__a = TFEfficientFormerForImageClassification(UpperCAmelCase )
__a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
__a = self.prepare_config_and_inputs()
__a , __a , __a = config_and_inputs
__a = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class a__ ( __snake_case , __snake_case , unittest.TestCase ):
A__ : Tuple = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
A__ : Tuple = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
A__ : Union[str, Any] = False
A__ : Any = False
A__ : Optional[Any] = False
A__ : int = False
A__ : Optional[Any] = False
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
__a = TFEfficientFormerModelTester(self )
__a = ConfigTester(
self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=3_7 )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='EfficientFormer does not use inputs_embeds' )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
pass
@unittest.skip(reason='EfficientFormer does not support input and output embeddings' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(UpperCAmelCase )
__a = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
def check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
__a = model_class(UpperCAmelCase )
__a = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) , training=UpperCAmelCase )
__a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__a = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
if hasattr(self.model_tester , 'encoder_seq_length' ):
__a = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , 'chunk_length' ) and self.model_tester.chunk_length > 1:
__a = seq_length * self.model_tester.chunk_length
else:
__a = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__a = outputs.decoder_hidden_states
self.asseretIsInstance(UpperCAmelCase , (list, tuple) )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
__a = getattr(self.model_tester , 'seq_length' , UpperCAmelCase )
__a = getattr(self.model_tester , 'decoder_seq_length' , UpperCAmelCase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> str:
__a = super()._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
@unittest.skip(reason='EfficientFormer does not implement masked image modeling yet' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = TFEfficientFormerModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = True
__a = getattr(self.model_tester , 'seq_length' , UpperCAmelCase )
__a = getattr(self.model_tester , 'encoder_seq_length' , UpperCAmelCase )
__a = getattr(self.model_tester , 'key_length' , UpperCAmelCase )
__a = getattr(self.model_tester , 'chunk_length' , UpperCAmelCase )
if chunk_length is not None and hasattr(self.model_tester , 'num_hashes' ):
__a = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__a = True
__a = False
__a = True
__a = model_class(UpperCAmelCase )
__a = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) , training=UpperCAmelCase )
__a = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__a = True
__a = model_class(UpperCAmelCase )
__a = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) , training=UpperCAmelCase )
__a = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__a = model_class(UpperCAmelCase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__a = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=UpperCAmelCase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__a = model(UpperCAmelCase )
self.assertTrue(outputs_dict is not None )
def lowerCAmelCase( ):
__a = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
return (
EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300' )
if is_vision_available()
else None
)
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
__a = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300' )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=UpperCAmelCase , return_tensors='tf' )
# forward pass
__a = model(**UpperCAmelCase , training=UpperCAmelCase )
# verify the logits
__a = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
__a = tf.constant([-0.0_555, 0.4_825, -0.0_852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
__a = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'snap-research/efficientformer-l1-300' )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=UpperCAmelCase , return_tensors='tf' )
# forward pass
__a = model(**UpperCAmelCase , training=UpperCAmelCase )
# verify the logits
__a = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
__a = tf.constant([-0.1_312, 0.4_353, -1.0_499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
| 246
|
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase( __lowerCamelCase ):
# A local function to see if a dot lands in the circle.
def is_in_circle(__lowerCamelCase , __lowerCamelCase ) -> bool:
__a = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__a = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(__lowerCamelCase ) )
# The ratio of the area for circle to square is pi/4.
__a = proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = 0.0 , __lowerCamelCase = 1.0 , ):
return mean(
function_to_integrate(uniform(__lowerCamelCase , __lowerCamelCase ) ) for _ in range(__lowerCamelCase ) ) * (max_value - min_value)
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase = 0.0 , __lowerCamelCase = 1.0 ):
def identity_function(__lowerCamelCase ) -> float:
return x
__a = area_under_curve_estimator(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__a = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print('******************' )
def lowerCAmelCase( __lowerCamelCase ):
def function_to_integrate(__lowerCamelCase ) -> float:
return sqrt(4.0 - x * x )
__a = area_under_curve_estimator(
__lowerCamelCase , __lowerCamelCase , 0.0 , 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 246
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.