code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from __future__ import annotations
import os
from collections.abc import Mapping
UpperCAmelCase_ : List[str] = tuple[int, int]
class lowerCamelCase_ :
def __init__( self : Dict , __A : set[int] , __A : Mapping[EdgeT, int] ):
__A : set[int] = vertices
__A : dict[EdgeT, int] = {
(min(__A ), max(__A )): weight for edge, weight in edges.items()
}
def lowerCAmelCase_ ( self : Union[str, Any] , __A : EdgeT , __A : int ):
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
__A : str = weight
def lowerCAmelCase_ ( self : Any ):
__A : Graph = Graph({min(self.vertices )} , {} )
__A : EdgeT
__A : int
__A : EdgeT
__A : int
while len(subgraph.vertices ) < len(self.vertices ):
__A : Tuple = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
__A : Optional[Any] = edge
__A : Tuple = weight
subgraph.add_edge(__A , __A )
return subgraph
def __SCREAMING_SNAKE_CASE ( a__ : str = "p107_network.txt" ) -> int:
__A : str = os.path.abspath(os.path.dirname(a__ ) )
__A : str = os.path.join(a__ ,a__ )
__A : dict[EdgeT, int] = {}
__A : list[str]
__A : int
__A : int
with open(a__ ) as f:
__A : Any = f.read().strip().split("""\n""" )
__A : List[str] = [line.split(""",""" ) for line in data]
for edgea in range(1 ,len(a__ ) ):
for edgea in range(a__ ):
if adjaceny_matrix[edgea][edgea] != "-":
__A : Optional[int] = int(adjaceny_matrix[edgea][edgea] )
__A : Graph = Graph(set(range(len(a__ ) ) ) ,a__ )
__A : Graph = graph.prims_algorithm()
__A : int = sum(graph.edges.values() )
__A : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 17 |
UpperCAmelCase_ : dict[str, float] = {
"joule": 1.0,
"kilojoule": 1_000,
"megajoule": 1_000_000,
"gigajoule": 1_000_000_000,
"wattsecond": 1.0,
"watthour": 3_600,
"kilowatthour": 3_600_000,
"newtonmeter": 1.0,
"calorie_nutr": 4_186.8,
"kilocalorie_nutr": 4_186_800.00,
"electronvolt": 1.6_0217_6634e-19,
"britishthermalunit_it": 1_055.05_585,
"footpound": 1.35_5818,
}
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ,a__ : float ) -> float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__A : Optional[int] = (
f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
f"""Valid values are: {", ".join(a__ )}"""
)
raise ValueError(a__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 1 |
'''simple docstring'''
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
SCREAMING_SNAKE_CASE : Optional[Any] = False
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def a__ ( self, _lowercase=32 ) -> Dict:
set_seed(0 )
SCREAMING_SNAKE_CASE_ = UNetaDModel(sample_size=_lowercase, in_channels=3, out_channels=3 )
SCREAMING_SNAKE_CASE_ = torch.optim.SGD(model.parameters(), lr=0.0_001 )
return model, optimizer
@slow
def a__ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
SCREAMING_SNAKE_CASE_ = DDPMScheduler(
num_train_timesteps=1000, beta_start=0.0_001, beta_end=0.02, beta_schedule='linear', clip_sample=_lowercase, )
SCREAMING_SNAKE_CASE_ = DDIMScheduler(
num_train_timesteps=1000, beta_start=0.0_001, beta_end=0.02, beta_schedule='linear', clip_sample=_lowercase, )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
SCREAMING_SNAKE_CASE_ = [torch.randn((4, 3, 32, 32) ).clip(-1, 1 ).to(_lowercase ) for _ in range(4 )]
SCREAMING_SNAKE_CASE_ = [torch.randn((4, 3, 32, 32) ).to(_lowercase ) for _ in range(4 )]
SCREAMING_SNAKE_CASE_ = [torch.randint(0, 1000, (4,) ).long().to(_lowercase ) for _ in range(4 )]
# train with a DDPM scheduler
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.get_model_optimizer(resolution=32 )
model.train().to(_lowercase )
for i in range(4 ):
optimizer.zero_grad()
SCREAMING_SNAKE_CASE_ = ddpm_scheduler.add_noise(clean_images[i], noise[i], timesteps[i] )
SCREAMING_SNAKE_CASE_ = model(_lowercase, timesteps[i] ).sample
SCREAMING_SNAKE_CASE_ = torch.nn.functional.mse_loss(_lowercase, noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.get_model_optimizer(resolution=32 )
model.train().to(_lowercase )
for i in range(4 ):
optimizer.zero_grad()
SCREAMING_SNAKE_CASE_ = ddim_scheduler.add_noise(clean_images[i], noise[i], timesteps[i] )
SCREAMING_SNAKE_CASE_ = model(_lowercase, timesteps[i] ).sample
SCREAMING_SNAKE_CASE_ = torch.nn.functional.mse_loss(_lowercase, noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(_lowercase, _lowercase, atol=1E-5 ) )
self.assertTrue(torch.allclose(_lowercase, _lowercase, atol=1E-5 ) )
| 715 |
'''simple docstring'''
class snake_case :
"""simple docstring"""
def __init__( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = {}
def a__ ( self, _lowercase ) -> int:
if vertex not in self.adjacency:
SCREAMING_SNAKE_CASE_ = {}
self.num_vertices += 1
def a__ ( self, _lowercase, _lowercase, _lowercase ) -> List[str]:
self.add_vertex(_lowercase )
self.add_vertex(_lowercase )
if head == tail:
return
SCREAMING_SNAKE_CASE_ = weight
SCREAMING_SNAKE_CASE_ = weight
def a__ ( self ) -> int:
SCREAMING_SNAKE_CASE_ = self.get_edges()
for edge in edges:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = edge
edges.remove((tail, head, weight) )
for i in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE_ = list(edges[i] )
edges.sort(key=lambda _lowercase : e[2] )
for i in range(len(_lowercase ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
SCREAMING_SNAKE_CASE_ = edges[i][2] + 1
for edge in edges:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = edge
SCREAMING_SNAKE_CASE_ = weight
SCREAMING_SNAKE_CASE_ = weight
def __str__( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
SCREAMING_SNAKE_CASE_ = self.adjacency[head][tail]
string += f"""{head} -> {tail} == {weight}\n"""
return string.rstrip('\n' )
def a__ ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def a__ ( self ) -> Any:
return self.adjacency.keys()
@staticmethod
def a__ ( _lowercase=None, _lowercase=None ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = Graph()
if vertices is None:
SCREAMING_SNAKE_CASE_ = []
if edges is None:
SCREAMING_SNAKE_CASE_ = []
for vertex in vertices:
g.add_vertex(_lowercase )
for edge in edges:
g.add_edge(*_lowercase )
return g
class snake_case :
"""simple docstring"""
def __init__( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = {}
def __len__( self ) -> Any:
return len(self.parent )
def a__ ( self, _lowercase ) -> Any:
if item in self.parent:
return self.find(_lowercase )
SCREAMING_SNAKE_CASE_ = item
SCREAMING_SNAKE_CASE_ = 0
return item
def a__ ( self, _lowercase ) -> List[str]:
if item not in self.parent:
return self.make_set(_lowercase )
if item != self.parent[item]:
SCREAMING_SNAKE_CASE_ = self.find(self.parent[item] )
return self.parent[item]
def a__ ( self, _lowercase, _lowercase ) -> Any:
SCREAMING_SNAKE_CASE_ = self.find(_lowercase )
SCREAMING_SNAKE_CASE_ = self.find(_lowercase )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
SCREAMING_SNAKE_CASE_ = roota
return roota
if self.rank[roota] < self.rank[roota]:
SCREAMING_SNAKE_CASE_ = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
SCREAMING_SNAKE_CASE_ = roota
return roota
return None
@staticmethod
def a__ ( _lowercase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = graph.num_vertices
SCREAMING_SNAKE_CASE_ = Graph.UnionFind()
SCREAMING_SNAKE_CASE_ = []
while num_components > 1:
SCREAMING_SNAKE_CASE_ = {}
for vertex in graph.get_vertices():
SCREAMING_SNAKE_CASE_ = -1
SCREAMING_SNAKE_CASE_ = graph.get_edges()
for edge in edges:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = edge
edges.remove((tail, head, weight) )
for edge in edges:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = edge
SCREAMING_SNAKE_CASE_ = union_find.find(_lowercase )
SCREAMING_SNAKE_CASE_ = union_find.find(_lowercase )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
SCREAMING_SNAKE_CASE_ = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
SCREAMING_SNAKE_CASE_ = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = cheap_edge[vertex]
if union_find.find(_lowercase ) != union_find.find(_lowercase ):
union_find.union(_lowercase, _lowercase )
mst_edges.append(cheap_edge[vertex] )
SCREAMING_SNAKE_CASE_ = num_components - 1
SCREAMING_SNAKE_CASE_ = Graph.build(edges=_lowercase )
return mst
| 238 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_UpperCAmelCase : List[str] = {
"""microsoft/trocr-base-handwritten""": (
"""https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"""
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class UpperCAmelCase ( a_ ):
"""simple docstring"""
A__ : Dict = 'trocr'
A__ : Any = ['past_key_values']
A__ : Any = {
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__( self , _snake_case=50265 , _snake_case=1024 , _snake_case=12 , _snake_case=16 , _snake_case=4096 , _snake_case="gelu" , _snake_case=512 , _snake_case=0.1 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=2 , _snake_case=0.02 , _snake_case=0.0 , _snake_case=True , _snake_case=False , _snake_case=True , _snake_case=True , _snake_case=1 , _snake_case=0 , _snake_case=2 , **_snake_case , ) -> str:
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : Any = d_model
_UpperCamelCase : List[Any] = decoder_layers
_UpperCamelCase : Tuple = decoder_attention_heads
_UpperCamelCase : Optional[int] = decoder_ffn_dim
_UpperCamelCase : Optional[Any] = activation_function
_UpperCamelCase : Optional[int] = max_position_embeddings
_UpperCamelCase : Dict = dropout
_UpperCamelCase : Optional[Any] = attention_dropout
_UpperCamelCase : Optional[Any] = activation_dropout
_UpperCamelCase : Optional[Any] = init_std
_UpperCamelCase : Any = decoder_layerdrop
_UpperCamelCase : List[str] = use_cache
_UpperCamelCase : Optional[Any] = scale_embedding
_UpperCamelCase : str = use_learned_position_embeddings
_UpperCamelCase : Tuple = layernorm_embedding
super().__init__(
pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , decoder_start_token_id=_snake_case , **_snake_case , )
| 683 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
_UpperCAmelCase : Union[str, Any] = (720, 1280) # Height, Width
_UpperCAmelCase : str = (0.4, 0.6) # if height or width lower than this scale, drop it.
_UpperCAmelCase : Optional[Any] = 1 / 100
_UpperCAmelCase : Optional[Any] = """"""
_UpperCAmelCase : int = """"""
_UpperCAmelCase : Union[str, Any] = """"""
_UpperCAmelCase : List[Any] = 250
def snake_case__ ( ) -> None:
_UpperCamelCase, _UpperCamelCase : List[Any] = get_dataset(UpperCamelCase ,UpperCamelCase )
for index in range(UpperCamelCase ):
_UpperCamelCase : List[str] = random.sample(range(len(UpperCamelCase ) ) ,4 )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[str] = update_image_and_anno(
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,filter_scale=UpperCamelCase ,)
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_UpperCamelCase : List[str] = random_chars(32 )
_UpperCamelCase : List[str] = path.split(os.sep )[-1].rsplit('''.''' ,1 )[0]
_UpperCamelCase : Any = f'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(f'''{file_root}.jpg''' ,UpperCamelCase ,[cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
_UpperCamelCase : Any = []
for anno in new_annos:
_UpperCamelCase : List[Any] = anno[3] - anno[1]
_UpperCamelCase : int = anno[4] - anno[2]
_UpperCamelCase : int = anno[1] + width / 2
_UpperCamelCase : int = anno[2] + height / 2
_UpperCamelCase : Optional[Any] = f'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(UpperCamelCase )
with open(f'''{file_root}.txt''' ,'''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> tuple[list, list]:
_UpperCamelCase : List[str] = []
_UpperCamelCase : Union[str, Any] = []
for label_file in glob.glob(os.path.join(UpperCamelCase ,'''*.txt''' ) ):
_UpperCamelCase : int = label_file.split(os.sep )[-1].rsplit('''.''' ,1 )[0]
with open(UpperCamelCase ) as in_file:
_UpperCamelCase : Dict = in_file.readlines()
_UpperCamelCase : Tuple = os.path.join(UpperCamelCase ,f'''{label_name}.jpg''' )
_UpperCamelCase : Tuple = []
for obj_list in obj_lists:
_UpperCamelCase : List[Any] = obj_list.rstrip('''\n''' ).split(''' ''' )
_UpperCamelCase : Tuple = float(obj[1] ) - float(obj[3] ) / 2
_UpperCamelCase : Any = float(obj[2] ) - float(obj[4] ) / 2
_UpperCamelCase : Tuple = float(obj[1] ) + float(obj[3] ) / 2
_UpperCamelCase : List[Any] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(UpperCamelCase )
labels.append(UpperCamelCase )
return img_paths, labels
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = 0.0 ,) -> tuple[list, list, str]:
_UpperCamelCase : Optional[int] = np.zeros([output_size[0], output_size[1], 3] ,dtype=np.uinta )
_UpperCamelCase : str = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_UpperCamelCase : Dict = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_UpperCamelCase : Dict = int(scale_x * output_size[1] )
_UpperCamelCase : Dict = int(scale_y * output_size[0] )
_UpperCamelCase : int = []
_UpperCamelCase : Union[str, Any] = []
for i, index in enumerate(UpperCamelCase ):
_UpperCamelCase : Optional[int] = all_img_list[index]
path_list.append(UpperCamelCase )
_UpperCamelCase : str = all_annos[index]
_UpperCamelCase : Tuple = cva.imread(UpperCamelCase )
if i == 0: # top-left
_UpperCamelCase : Any = cva.resize(UpperCamelCase ,(divid_point_x, divid_point_y) )
_UpperCamelCase : Any = img
for bbox in img_annos:
_UpperCamelCase : List[Any] = bbox[1] * scale_x
_UpperCamelCase : Dict = bbox[2] * scale_y
_UpperCamelCase : Any = bbox[3] * scale_x
_UpperCamelCase : Any = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
_UpperCamelCase : Union[str, Any] = cva.resize(UpperCamelCase ,(output_size[1] - divid_point_x, divid_point_y) )
_UpperCamelCase : List[Any] = img
for bbox in img_annos:
_UpperCamelCase : Any = scale_x + bbox[1] * (1 - scale_x)
_UpperCamelCase : Optional[Any] = bbox[2] * scale_y
_UpperCamelCase : Any = scale_x + bbox[3] * (1 - scale_x)
_UpperCamelCase : Optional[int] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
_UpperCamelCase : Dict = cva.resize(UpperCamelCase ,(divid_point_x, output_size[0] - divid_point_y) )
_UpperCamelCase : List[str] = img
for bbox in img_annos:
_UpperCamelCase : int = bbox[1] * scale_x
_UpperCamelCase : Optional[Any] = scale_y + bbox[2] * (1 - scale_y)
_UpperCamelCase : int = bbox[3] * scale_x
_UpperCamelCase : Any = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
_UpperCamelCase : Dict = cva.resize(
UpperCamelCase ,(output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
_UpperCamelCase : Union[str, Any] = img
for bbox in img_annos:
_UpperCamelCase : Optional[int] = scale_x + bbox[1] * (1 - scale_x)
_UpperCamelCase : Union[str, Any] = scale_y + bbox[2] * (1 - scale_y)
_UpperCamelCase : Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
_UpperCamelCase : List[str] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
_UpperCamelCase : Optional[Any] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def snake_case__ ( UpperCamelCase ) -> str:
assert number_char > 1, "The number of character should greater than 1"
_UpperCamelCase : Tuple = ascii_lowercase + digits
return "".join(random.choice(UpperCamelCase ) for _ in range(UpperCamelCase ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 683 | 1 |
from math import factorial
class __SCREAMING_SNAKE_CASE :
def __init__( self, _a, _a ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = real
if isinstance(_a, _a ):
__SCREAMING_SNAKE_CASE = [1] * rank
else:
__SCREAMING_SNAKE_CASE = rank
def __repr__( self ) -> Optional[Any]:
return (
f'''{self.real}+'''
f'''{"+".join(str(_a )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'''
)
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real, _a )
def __add__( self, _a ) -> Tuple:
if not isinstance(_a, _a ):
return Dual(self.real + other, self.duals )
__SCREAMING_SNAKE_CASE = self.duals.copy()
__SCREAMING_SNAKE_CASE = other.duals.copy()
if len(_a ) > len(_a ):
o_dual.extend([1] * (len(_a ) - len(_a )) )
elif len(_a ) < len(_a ):
s_dual.extend([1] * (len(_a ) - len(_a )) )
__SCREAMING_SNAKE_CASE = []
for i in range(len(_a ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real, _a )
SCREAMING_SNAKE_CASE__ =__add__
def __sub__( self, _a ) -> Any:
return self + other * -1
def __mul__( self, _a ) -> Dict:
if not isinstance(_a, _a ):
__SCREAMING_SNAKE_CASE = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other, _a )
__SCREAMING_SNAKE_CASE = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real, _a )
SCREAMING_SNAKE_CASE__ =__mul__
def __truediv__( self, _a ) -> Any:
if not isinstance(_a, _a ):
__SCREAMING_SNAKE_CASE = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other, _a )
raise ValueError
def __floordiv__( self, _a ) -> Dict:
if not isinstance(_a, _a ):
__SCREAMING_SNAKE_CASE = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other, _a )
raise ValueError
def __pow__( self, _a ) -> Any:
if n < 0 or isinstance(_a, _a ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
__SCREAMING_SNAKE_CASE = self
for _ in range(n - 1 ):
x *= self
return x
def _A ( __snake_case :Dict , __snake_case :Optional[Any] , __snake_case :int ) -> str:
"""simple docstring"""
if not callable(__snake_case ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(__snake_case , (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(__snake_case , __snake_case ):
raise ValueError("differentiate() requires an int as input for order" )
__SCREAMING_SNAKE_CASE = Dual(__snake_case , 1 )
__SCREAMING_SNAKE_CASE = func(__snake_case )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
def _A ( __snake_case :Dict ) -> Optional[Any]:
"""simple docstring"""
return y**2 * y**4
print(differentiate(f, 9, 2))
| 714 |
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _A ( ) -> Optional[int]:
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__SCREAMING_SNAKE_CASE = "__test_patch_submodule_mock__"
with patch_submodule(_test_patching , "os.path.join" , __snake_case ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _A ( ) -> Any:
"""simple docstring"""
assert _test_patching.open is open
__SCREAMING_SNAKE_CASE = "__test_patch_submodule_builtin_mock__"
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , "open" , __snake_case ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _A ( ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = "__test_patch_submodule_missing_mock__"
with patch_submodule(_test_patching , "pandas.read_csv" , __snake_case ):
pass
def _A ( ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = "__test_patch_submodule_missing_builtin_mock__"
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , "len" , __snake_case ) is None
with patch_submodule(_test_patching , "len" , __snake_case ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _A ( ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = "__test_patch_submodule_start_and_stop_mock__"
__SCREAMING_SNAKE_CASE = patch_submodule(_test_patching , "open" , __snake_case )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _A ( ) -> str:
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__SCREAMING_SNAKE_CASE = "__test_patch_submodule_successive_join__"
__SCREAMING_SNAKE_CASE = "__test_patch_submodule_successive_dirname__"
__SCREAMING_SNAKE_CASE = "__test_patch_submodule_successive_rename__"
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , "os.path.join" , __snake_case ):
with patch_submodule(_test_patching , "os.rename" , __snake_case ):
with patch_submodule(_test_patching , "os.path.dirname" , __snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , "os.rename" , __snake_case ):
with patch_submodule(_test_patching , "os.path.join" , __snake_case ):
with patch_submodule(_test_patching , "os.path.dirname" , __snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _A ( ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = "__test_patch_submodule_doesnt_exist_mock__"
with patch_submodule(_test_patching , "__module_that_doesn_exist__.__attribute_that_doesn_exist__" , __snake_case ):
pass
with patch_submodule(_test_patching , "os.__attribute_that_doesn_exist__" , __snake_case ):
pass
| 214 | 0 |
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def lowerCAmelCase__ ( a__: str , a__: str , a__: Optional[str] = None ) -> str:
'''simple docstring'''
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
_UpperCAmelCase = quote(a__ )
return hfh.hf_hub_url(a__ , a__ , repo_type='dataset' , revision=a__ )
| 618 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ :Tuple = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :List[Any] = ['''DeiTFeatureExtractor''']
lowerCAmelCase__ :Union[str, Any] = ['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :Tuple = [
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :Union[str, Any] = [
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ :List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 618 | 1 |
from __future__ import annotations
from typing import Any
def __snake_case ( _UpperCamelCase ) -> int:
if not postfix_notation:
return 0
_a = {'''+''', '''-''', '''*''', '''/'''}
_a = []
for token in postfix_notation:
if token in operations:
_a , _a = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(_UpperCamelCase ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 346 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
lowerCamelCase :Dict = random.Random()
def __snake_case ( _UpperCamelCase , _UpperCamelCase=1.0 , _UpperCamelCase=None , _UpperCamelCase=None ) -> Optional[int]:
if rng is None:
_a = global_rng
_a = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase ( unittest.TestCase ):
def __init__( self: Tuple , __UpperCamelCase: Dict , __UpperCamelCase: int=7 , __UpperCamelCase: Any=400 , __UpperCamelCase: List[str]=2000 , __UpperCamelCase: Union[str, Any]=2048 , __UpperCamelCase: int=128 , __UpperCamelCase: Optional[int]=1 , __UpperCamelCase: Tuple=512 , __UpperCamelCase: List[Any]=30 , __UpperCamelCase: Dict=4_4100 , ):
_a = parent
_a = batch_size
_a = min_seq_length
_a = max_seq_length
_a = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_a = spectrogram_length
_a = feature_size
_a = num_audio_channels
_a = hop_length
_a = chunk_length
_a = sampling_rate
def _A ( self: int ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def _A ( self: List[Any] , __UpperCamelCase: List[Any]=False , __UpperCamelCase: List[str]=False ):
def _flatten(__UpperCamelCase: Tuple ):
return list(itertools.chain(*__UpperCamelCase ) )
if equal_length:
_a = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_a = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_a = [np.asarray(__UpperCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase ( __snake_case , unittest.TestCase ):
a: Union[str, Any] = TvltFeatureExtractor
def _A ( self: Optional[Any] ):
_a = TvltFeatureExtractionTester(self )
def _A ( self: Optional[Any] ):
_a = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(__UpperCamelCase , '''spectrogram_length''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''feature_size''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''num_audio_channels''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''hop_length''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''chunk_length''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''sampling_rate''' ) )
def _A ( self: List[str] ):
_a = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a = feat_extract_first.save_pretrained(__UpperCamelCase )[0]
check_json_file_has_correct_format(__UpperCamelCase )
_a = self.feature_extraction_class.from_pretrained(__UpperCamelCase )
_a = feat_extract_first.to_dict()
_a = feat_extract_second.to_dict()
_a = dict_first.pop('''mel_filters''' )
_a = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def _A ( self: List[str] ):
_a = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a = os.path.join(__UpperCamelCase , '''feat_extract.json''' )
feat_extract_first.to_json_file(__UpperCamelCase )
_a = self.feature_extraction_class.from_json_file(__UpperCamelCase )
_a = feat_extract_first.to_dict()
_a = feat_extract_second.to_dict()
_a = dict_first.pop('''mel_filters''' )
_a = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def _A ( self: List[str] ):
# Initialize feature_extractor
_a = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_a = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_a = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
# Test not batched input
_a = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_a = feature_extractor(__UpperCamelCase , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_a = feature_extractor(
__UpperCamelCase , return_tensors='''np''' , sampling_rate=4_4100 , mask_audio=__UpperCamelCase ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_a = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_a = np.asarray(__UpperCamelCase )
_a = feature_extractor(__UpperCamelCase , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def _A ( self: Optional[int] , __UpperCamelCase: Dict ):
_a = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_a = ds.sort('''id''' ).select(range(__UpperCamelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def _A ( self: Optional[Any] ):
_a = self._load_datasamples(1 )
_a = TvltFeatureExtractor()
_a = feature_extractor(__UpperCamelCase , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
_a = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , __UpperCamelCase , atol=1E-4 ) )
| 346 | 1 |
import argparse
import os
import re
import packaging.version
a_ :int = 'examples/'
a_ :Dict = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
a_ :Union[str, Any] = {
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
a_ :List[str] = 'README.md'
def a ( A__ , A__ , A__ ) -> Dict:
'''simple docstring'''
with open(A__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE__ : Any = f.read()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = REPLACE_PATTERNS[pattern]
SCREAMING_SNAKE_CASE__ : Tuple = replace.replace('''VERSION''' , A__ )
SCREAMING_SNAKE_CASE__ : int = re_pattern.sub(A__ , A__ )
with open(A__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(A__ )
def a ( A__ ) -> List[Any]:
'''simple docstring'''
for folder, directories, fnames in os.walk(A__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(A__ , A__ ) , A__ , pattern='''examples''' )
def a ( A__ , A__=False ) -> Union[str, Any]:
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(A__ , A__ , A__ )
if not patch:
update_version_in_examples(A__ )
def a ( ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = '''🤗 Transformers currently provides the following architectures'''
SCREAMING_SNAKE_CASE__ : Optional[int] = '''1. Want to contribute a new model?'''
with open(A__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE__ : str = f.readlines()
# Find the start of the list.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE__ : List[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
SCREAMING_SNAKE_CASE__ : Any = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(A__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(A__ )
def a ( ) -> Dict:
'''simple docstring'''
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
SCREAMING_SNAKE_CASE__ : Tuple = f.read()
SCREAMING_SNAKE_CASE__ : int = REPLACE_PATTERNS['''init'''][0].search(A__ ).groups()[0]
return packaging.version.parse(A__ )
def a ( A__=False ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
SCREAMING_SNAKE_CASE__ : Dict = default_version.base_version
elif patch:
SCREAMING_SNAKE_CASE__ : int = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
SCREAMING_SNAKE_CASE__ : List[str] = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = input(f"""Which version are you releasing? [{default_version}]""" )
if len(A__ ) == 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = default_version
print(f"""Updating version to {version}.""" )
global_version_update(A__ , patch=A__ )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def a ( ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = get_version()
SCREAMING_SNAKE_CASE__ : List[str] = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
SCREAMING_SNAKE_CASE__ : List[Any] = current_version.base_version
# Check with the user we got that right.
SCREAMING_SNAKE_CASE__ : Any = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(A__ ) == 0:
SCREAMING_SNAKE_CASE__ : List[str] = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(A__ )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
a_ :Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
a_ :int = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 35 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def A ( _A, _A, _A=None, _A=None ):
"""simple docstring"""
if attention_mask is None:
snake_case_ :List[str] = tf.cast(tf.math.not_equal(_A, config.pad_token_id ), tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class __lowerCAmelCase :
'''simple docstring'''
a__ = OPTConfig
a__ = {}
a__ = 'gelu'
def __init__( self , a , a=13 , a=7 , a=True , a=False , a=99 , a=16 , a=2 , a=4 , a=4 , a="gelu" , a=0.1 , a=0.1 , a=20 , a=2 , a=1 , a=0 , a=16 , a=16 , ):
"""simple docstring"""
snake_case_ :Dict = parent
snake_case_ :Tuple = batch_size
snake_case_ :int = seq_length
snake_case_ :List[Any] = is_training
snake_case_ :Tuple = use_labels
snake_case_ :List[str] = vocab_size
snake_case_ :Dict = hidden_size
snake_case_ :Union[str, Any] = num_hidden_layers
snake_case_ :Any = num_attention_heads
snake_case_ :List[str] = intermediate_size
snake_case_ :int = hidden_act
snake_case_ :Dict = hidden_dropout_prob
snake_case_ :Any = attention_probs_dropout_prob
snake_case_ :str = max_position_embeddings
snake_case_ :Tuple = eos_token_id
snake_case_ :Optional[int] = pad_token_id
snake_case_ :Optional[int] = bos_token_id
snake_case_ :Any = embed_dim
snake_case_ :Any = word_embed_proj_dim
snake_case_ :Tuple = False
def _a ( self ):
"""simple docstring"""
snake_case_ :Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case_ :List[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case_ :Optional[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case_ :Union[str, Any] = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=a , **self.config_updates , )
snake_case_ :List[Any] = prepare_opt_inputs_dict(a , a )
return config, inputs_dict
def _a ( self , a , a ):
"""simple docstring"""
snake_case_ :Union[str, Any] = TFOPTModel(config=a )
snake_case_ :Union[str, Any] = inputs_dict["input_ids"]
snake_case_ :Tuple = input_ids[:1, :]
snake_case_ :Union[str, Any] = inputs_dict["attention_mask"][:1, :]
snake_case_ :Union[str, Any] = 1
# first forward pass
snake_case_ :int = model(a , attention_mask=a , use_cache=a )
snake_case_ , snake_case_ :List[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case_ :List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ :Optional[int] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
snake_case_ :List[str] = tf.concat([input_ids, next_tokens] , axis=-1 )
snake_case_ :List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
snake_case_ :List[str] = model(a , attention_mask=a )[0]
snake_case_ :int = model(a , attention_mask=a , past_key_values=a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
snake_case_ :List[str] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
snake_case_ :List[Any] = output_from_no_past[:, -3:, random_slice_idx]
snake_case_ :Union[str, Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(a , a , rtol=1e-3 )
@require_tf
class __lowerCAmelCase (__UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
'''simple docstring'''
a__ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
a__ = (TFOPTForCausalLM,) if is_tf_available() else ()
a__ = (
{'feature-extraction': TFOPTModel, 'text-generation': TFOPTForCausalLM} if is_tf_available() else {}
)
a__ = False
a__ = False
a__ = False
a__ = 10
def _a ( self ):
"""simple docstring"""
snake_case_ :Union[str, Any] = TFOPTModelTester(self )
snake_case_ :Tuple = ConfigTester(self , config_class=a )
def _a ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self ):
"""simple docstring"""
snake_case_ :Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*a )
def _a ( self ):
"""simple docstring"""
snake_case_ , snake_case_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(a , a ):
if hasattr(a , "weight" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(a , "weight" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
snake_case_ :str = model_class(config=a )
snake_case_ :List[str] = _get_word_embedding_weight(a , model.get_input_embeddings() )
snake_case_ :Optional[Any] = _get_word_embedding_weight(a , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(a )
snake_case_ :Tuple = _get_word_embedding_weight(a , model.get_input_embeddings() )
snake_case_ :Tuple = _get_word_embedding_weight(a , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
snake_case_ :str = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , a )
# check that weights remain the same after resizing
snake_case_ :List[str] = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
snake_case_ :List[Any] = False
self.assertTrue(a )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , a )
snake_case_ :List[Any] = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
snake_case_ :Optional[Any] = False
self.assertTrue(a )
def A ( _A ):
"""simple docstring"""
return tf.constant(_A, dtype=tf.intaa )
@require_tf
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
a__ = 99
def _a ( self ):
"""simple docstring"""
snake_case_ :Any = tf.ones((4, 1) , dtype=tf.intaa ) * 2
snake_case_ :Any = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
snake_case_ :List[str] = input_ids.shape[0]
snake_case_ :Union[str, Any] = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
@slow
def _a ( self ):
"""simple docstring"""
snake_case_ :int = TFOPTModel.from_pretrained("facebook/opt-350m" )
snake_case_ :List[Any] = _long_tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
snake_case_ :str = tf.not_equal(a , model.config.pad_token_id )
with tf.GradientTape():
snake_case_ :Dict = model(input_ids=a , attention_mask=a ).last_hidden_state
snake_case_ :Optional[int] = (1, 11, 5_12)
self.assertEqual(output.shape , a )
snake_case_ :Union[str, Any] = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , a , atol=4e-3 ) )
snake_case_ :Optional[int] = tf.function(a , jit_compile=a )
snake_case_ :Optional[int] = xla_generate(a , a )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , a , atol=4e-2 ) )
@require_tf
@slow
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
def _a ( self ):
"""simple docstring"""
super().setUp()
snake_case_ :List[str] = "facebook/opt-350m"
def _a ( self ):
"""simple docstring"""
snake_case_ :Optional[Any] = TFOPTForCausalLM.from_pretrained(self.path_model )
snake_case_ :List[Any] = GPTaTokenizer.from_pretrained(self.path_model )
snake_case_ :Tuple = [
"Today is a beautiful day and I want to",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
snake_case_ :Optional[Any] = tokenizer(a , return_tensors="tf" , padding=a , add_special_tokens=a )
snake_case_ :Optional[Any] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
snake_case_ :int = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
] )
self.assertTrue(np.allclose(a , a , atol=1e-4 ) )
snake_case_ :int = tf.function(a , jit_compile=a )
snake_case_ :List[str] = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(a , a , atol=1e-4 ) )
@require_tf
@slow
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
@property
def _a ( self ):
"""simple docstring"""
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def _a ( self ):
"""simple docstring"""
snake_case_ :int = "facebook/opt-125m"
snake_case_ :List[str] = [
"Today is a beautiful day and I want to",
"In the city of New York, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
snake_case_ :Tuple = []
snake_case_ :Optional[int] = GPTaTokenizer.from_pretrained(a )
snake_case_ :List[str] = TFOPTForCausalLM.from_pretrained(a )
for prompt in self.prompts:
snake_case_ :Dict = tokenizer(a , return_tensors="tf" ).input_ids
snake_case_ :str = model.generate(a , max_length=10 )
snake_case_ :Optional[Any] = tokenizer.batch_decode(a , skip_special_tokens=a )
predicted_outputs += generated_string
self.assertListEqual(a , a )
def _a ( self ):
"""simple docstring"""
snake_case_ :Optional[int] = "facebook/opt-350m"
snake_case_ :Tuple = GPTaTokenizer.from_pretrained(a )
snake_case_ :Tuple = TFOPTForCausalLM.from_pretrained(a )
snake_case_ :List[str] = "left"
# use different length sentences to test batching
snake_case_ :Dict = [
"Hello, my dog is a little",
"Today, I",
]
snake_case_ :int = tokenizer(a , return_tensors="tf" , padding=a )
snake_case_ :Dict = inputs["input_ids"]
snake_case_ :List[Any] = model.generate(input_ids=a , attention_mask=inputs["attention_mask"] )
snake_case_ :str = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
snake_case_ :Any = model.generate(input_ids=a )
snake_case_ :List[Any] = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["attention_mask"][-1] , tf.intaa ) )
snake_case_ :List[str] = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
snake_case_ :Union[str, Any] = model.generate(input_ids=a , max_length=model.config.max_length - num_paddings )
snake_case_ :Union[str, Any] = tokenizer.batch_decode(a , skip_special_tokens=a )
snake_case_ :int = tokenizer.decode(output_non_padded[0] , skip_special_tokens=a )
snake_case_ :Optional[int] = tokenizer.decode(output_padded[0] , skip_special_tokens=a )
snake_case_ :str = [
"Hello, my dog is a little bit of a dork.\nI'm a little bit",
"Today, I was in the middle of a conversation with a friend about the",
]
self.assertListEqual(a , a )
self.assertListEqual(a , [non_padded_sentence, padded_sentence] )
def _a ( self ):
"""simple docstring"""
snake_case_ :Tuple = "facebook/opt-350m"
snake_case_ :int = [
"Today is a beautiful day and I want to",
"In the city of San Francisco, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
snake_case_ :str = []
snake_case_ :Tuple = GPTaTokenizer.from_pretrained(a )
snake_case_ :Any = TFOPTForCausalLM.from_pretrained(a )
for prompt in self.prompts:
snake_case_ :Any = tokenizer(a , return_tensors="tf" ).input_ids
snake_case_ :List[str] = model.generate(a , max_length=10 )
snake_case_ :str = tokenizer.batch_decode(a , skip_special_tokens=a )
predicted_outputs += generated_string
self.assertListEqual(a , a )
| 584 | 0 |
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
UpperCamelCase = getLogger(__name__)
UpperCamelCase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def _a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 8 , lowerCamelCase__ = DEFAULT_DEVICE , lowerCamelCase__=False , lowerCamelCase__="summarization" , lowerCamelCase__=None , **lowerCamelCase__ , ) -> Dict:
lowerCamelCase_ : Tuple = Path(lowerCamelCase__ ).open('w' , encoding='utf-8' )
lowerCamelCase_ : Dict = str(lowerCamelCase__ )
lowerCamelCase_ : Any = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ ).to(lowerCamelCase__ )
if fpaa:
lowerCamelCase_ : Any = model.half()
lowerCamelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCamelCase__ )
logger.info(F'Inferred tokenizer type: {tokenizer.__class__}' ) # if this is wrong, check config.model_type.
lowerCamelCase_ : Union[str, Any] = time.time()
# update config with task specific params
use_task_specific_params(lowerCamelCase__ , lowerCamelCase__ )
if prefix is None:
lowerCamelCase_ : List[str] = prefix or getattr(model.config , 'prefix' , '' ) or ''
for examples_chunk in tqdm(list(chunks(lowerCamelCase__ , lowerCamelCase__ ) ) ):
lowerCamelCase_ : Any = [prefix + text for text in examples_chunk]
lowerCamelCase_ : List[str] = tokenizer(lowerCamelCase__ , return_tensors='pt' , truncation=lowerCamelCase__ , padding='longest' ).to(lowerCamelCase__ )
lowerCamelCase_ : int = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **lowerCamelCase__ , )
lowerCamelCase_ : Optional[Any] = tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ )
for hypothesis in dec:
fout.write(hypothesis + '\n' )
fout.flush()
fout.close()
lowerCamelCase_ : str = int(time.time() - start_time ) # seconds
lowerCamelCase_ : List[Any] = len(lowerCamelCase__ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def _a ( ) -> List[Any]:
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )
def _a ( lowerCamelCase__=True ) -> Optional[Any]:
lowerCamelCase_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('model_name' , type=lowerCamelCase__ , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('input_path' , type=lowerCamelCase__ , help='like cnn_dm/test.source' )
parser.add_argument('save_path' , type=lowerCamelCase__ , help='where to save summaries' )
parser.add_argument('--reference_path' , type=lowerCamelCase__ , required=lowerCamelCase__ , help='like cnn_dm/test.target' )
parser.add_argument('--score_path' , type=lowerCamelCase__ , required=lowerCamelCase__ , default='metrics.json' , help='where to save metrics' )
parser.add_argument('--device' , type=lowerCamelCase__ , required=lowerCamelCase__ , default=lowerCamelCase__ , help='cuda, cuda:1, cpu etc.' )
parser.add_argument(
'--prefix' , type=lowerCamelCase__ , required=lowerCamelCase__ , default=lowerCamelCase__ , help='will be added to the begininng of src examples' )
parser.add_argument('--task' , type=lowerCamelCase__ , default='summarization' , help='used for task_specific_params + metrics' )
parser.add_argument('--bs' , type=lowerCamelCase__ , default=8 , required=lowerCamelCase__ , help='batch size' )
parser.add_argument(
'--n_obs' , type=lowerCamelCase__ , default=-1 , required=lowerCamelCase__ , help='How many observations. Defaults to all.' )
parser.add_argument('--fp16' , action='store_true' )
parser.add_argument('--dump-args' , action='store_true' , help='print the custom hparams with the results' )
parser.add_argument(
'--info' , nargs='?' , type=lowerCamelCase__ , const=datetime_now() , help=(
'use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'
' lang=en-ru. If no value is passed, the current datetime string will be used.'
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
lowerCamelCase_ : Tuple = parser.parse_known_args()
lowerCamelCase_ : List[str] = parse_numeric_n_bool_cl_kwargs(lowerCamelCase__ )
if parsed_args and verbose:
print(F'parsed the following generate kwargs: {parsed_args}' )
lowerCamelCase_ : str = [' ' + x.rstrip() if 't5' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
lowerCamelCase_ : Dict = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=lowerCamelCase__ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F'score_path {args.score_path} will be overwritten unless you type ctrl-c.' )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('Can\'t mix --fp16 and --device cpu' )
lowerCamelCase_ : Union[str, Any] = generate_summaries_or_translations(
lowerCamelCase__ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **lowerCamelCase__ , )
if args.reference_path is None:
return {}
# Compute scores
lowerCamelCase_ : Tuple = calculate_bleu if 'translation' in args.task else calculate_rouge
lowerCamelCase_ : Optional[int] = [x.rstrip() for x in open(args.save_path ).readlines()]
lowerCamelCase_ : Optional[int] = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(lowerCamelCase__ )]
lowerCamelCase_ : dict = score_fn(lowerCamelCase__ , lowerCamelCase__ )
scores.update(lowerCamelCase__ )
if args.dump_args:
scores.update(lowerCamelCase__ )
if args.info:
lowerCamelCase_ : Any = args.info
if verbose:
print(lowerCamelCase__ )
if args.score_path is not None:
json.dump(lowerCamelCase__ , open(args.score_path , 'w' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 715 |
from __future__ import annotations
def _a ( lowerCamelCase__ , lowerCamelCase__ ) -> list[int]:
lowerCamelCase_ : List[Any] = 0
lowerCamelCase_ : Union[str, Any] = len(lowerCamelCase__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowerCamelCase_ : List[Any] = i + 1
else:
lowerCamelCase_ : Any = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{two_pointer([2, 7, 1_1, 1_5], 9) = }''')
| 144 | 0 |
'''simple docstring'''
__lowerCamelCase : Any = {
0: "0",
1: "1",
2: "2",
3: "3",
4: "4",
5: "5",
6: "6",
7: "7",
8: "8",
9: "9",
10: "a",
11: "b",
12: "c",
13: "d",
14: "e",
15: "f",
}
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
assert type(lowerCAmelCase_ ) in (int, float) and decimal == int(lowerCAmelCase_ )
lowercase = int(lowerCAmelCase_ )
lowercase = ""
lowercase = False
if decimal < 0:
lowercase = True
decimal *= -1
while decimal > 0:
lowercase , lowercase = divmod(lowerCAmelCase_ , 16 )
lowercase = values[remainder] + hexadecimal
lowercase = "0x" + hexadecimal
if negative:
lowercase = "-" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310 |
'''simple docstring'''
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = []
lowercase = set({"(", "[", "{"} )
lowercase = set({")", "]", "}"} )
lowercase = {"{": "}", "[": "]", "(": ")"}
for i in range(len(lowerCAmelCase_ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(lowerCAmelCase_ ) == 0 or (len(lowerCAmelCase_ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(lowerCAmelCase_ ) == 0
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowercase = input("Enter sequence of brackets: " )
if is_balanced(lowerCAmelCase_ ):
print(lowerCAmelCase_ , "is balanced" )
else:
print(lowerCAmelCase_ , "is not balanced" )
if __name__ == "__main__":
main()
| 310 | 1 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def lowerCamelCase__ ( __lowerCAmelCase : Any ):
"""simple docstring"""
lowerCAmelCase_ = FileLock(str(tmpdir / "foo.lock" ) )
lowerCAmelCase_ = FileLock(str(tmpdir / "foo.lock" ) )
lowerCAmelCase_ = 0.01
with locka.acquire():
with pytest.raises(__lowerCAmelCase ):
lowerCAmelCase_ = time.time()
locka.acquire(__lowerCAmelCase )
assert time.time() - _start > timeout
def lowerCamelCase__ ( __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = "a" * 1000 + ".lock"
lowerCAmelCase_ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(__lowerCAmelCase )
assert len(os.path.basename(locka._lock_file ) ) <= 255
lowerCAmelCase_ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__lowerCAmelCase ):
locka.acquire(0 )
| 279 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _lowerCAmelCase ( __a , __a , __a , unittest.TestCase ):
_lowercase =StableUnCLIPPipeline
_lowercase =TEXT_TO_IMAGE_PARAMS
_lowercase =TEXT_TO_IMAGE_BATCH_PARAMS
_lowercase =TEXT_TO_IMAGE_IMAGE_PARAMS
_lowercase =TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_lowercase =False
def __a ( self ) -> Dict:
lowerCAmelCase_ = 32
lowerCAmelCase_ = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowerCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowerCAmelCase_ = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_UpperCamelCase , projection_dim=_UpperCamelCase , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
lowerCAmelCase_ = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_UpperCamelCase , num_layers=1 , )
torch.manual_seed(0 )
lowerCAmelCase_ = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_000 , clip_sample=_UpperCamelCase , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
lowerCAmelCase_ = StableUnCLIPImageNormalizer(embedding_dim=_UpperCamelCase )
lowerCAmelCase_ = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
lowerCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowerCAmelCase_ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_UpperCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_UpperCamelCase , layers_per_block=1 , upcast_attention=_UpperCamelCase , use_linear_projection=_UpperCamelCase , )
torch.manual_seed(0 )
lowerCAmelCase_ = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=_UpperCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
lowerCAmelCase_ = AutoencoderKL()
lowerCAmelCase_ = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def __a ( self , _UpperCamelCase , _UpperCamelCase=0 ) -> Tuple:
if str(_UpperCamelCase ).startswith("mps" ):
lowerCAmelCase_ = torch.manual_seed(_UpperCamelCase )
else:
lowerCAmelCase_ = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
lowerCAmelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=_UpperCamelCase )
def __a ( self ) -> str:
lowerCAmelCase_ = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=_UpperCamelCase )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def __a ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
lowerCAmelCase_ = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCAmelCase_ = pipe("anime turle" , generator=_UpperCamelCase , output_type="np" )
lowerCAmelCase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase )
def __a ( self ) -> Optional[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase_ = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
lowerCAmelCase_ = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase_ = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
lowerCAmelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 279 | 1 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Any =TaConfig.from_json_file(__a )
print(F"""Building PyTorch model from configuration: {config}""" )
lowerCamelCase__: Union[str, Any] =TaForConditionalGeneration(__a )
# Load weights from tf checkpoint
load_tf_weights_in_ta(__a , __a , __a )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__a )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 59 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
__A = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : Optional[int] , **UpperCAmelCase_ : List[Any]) ->List[str]:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
requires_backends(self , "vision")
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING)
def __call__(self : List[str] , UpperCAmelCase_ : Union[str, List[str], "Image", List["Image"]] , **UpperCAmelCase_ : List[Any]) ->Tuple:
'''simple docstring'''
return super().__call__(UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any] , **UpperCAmelCase_ : Optional[int]) ->Any:
'''simple docstring'''
lowerCamelCase__: Optional[int] ={}
if "candidate_labels" in kwargs:
lowerCamelCase__: Tuple =kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
lowerCamelCase__: Tuple =kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Optional[Any]="This is a photo of {}.") ->str:
'''simple docstring'''
lowerCamelCase__: int =load_image(UpperCAmelCase_)
lowerCamelCase__: Any =self.image_processor(images=[image] , return_tensors=self.framework)
lowerCamelCase__: Any =candidate_labels
lowerCamelCase__: List[str] =[hypothesis_template.format(UpperCAmelCase_) for x in candidate_labels]
lowerCamelCase__: int =self.tokenizer(UpperCAmelCase_ , return_tensors=self.framework , padding=UpperCAmelCase_)
lowerCamelCase__: str =[text_inputs]
return inputs
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Any) ->Tuple:
'''simple docstring'''
lowerCamelCase__: int =model_inputs.pop("candidate_labels")
lowerCamelCase__: List[str] =model_inputs.pop("text_inputs")
if isinstance(text_inputs[0] , UpperCAmelCase_):
lowerCamelCase__: List[Any] =text_inputs[0]
else:
# Batching case.
lowerCamelCase__: List[Any] =text_inputs[0][0]
lowerCamelCase__: List[str] =self.model(**UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: str ={
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : Union[str, Any]) ->int:
'''simple docstring'''
lowerCamelCase__: List[Any] =model_outputs.pop("candidate_labels")
lowerCamelCase__: Optional[int] =model_outputs["logits"][0]
if self.framework == "pt":
lowerCamelCase__: Optional[Any] =logits.softmax(dim=-1).squeeze(-1)
lowerCamelCase__: Optional[Any] =probs.tolist()
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: Optional[int] =[scores]
elif self.framework == "tf":
lowerCamelCase__: List[str] =stable_softmax(UpperCAmelCase_ , axis=-1)
lowerCamelCase__: Optional[int] =probs.numpy().tolist()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""")
lowerCamelCase__: Optional[int] =[
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(UpperCAmelCase_ , UpperCAmelCase_) , key=lambda UpperCAmelCase_: -x[0])
]
return result
| 59 | 1 |
snake_case = 2_5_6
# Modulus to hash a string
snake_case = 1_0_0_0_0_0_3
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str , snake_case__ :str ) -> bool:
_lowercase = len(a_ )
_lowercase = len(a_ )
if p_len > t_len:
return False
_lowercase = 0
_lowercase = 0
_lowercase = 1
# Calculating the hash of pattern and substring of text
for i in range(a_ ):
_lowercase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_lowercase = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_lowercase = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_lowercase = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def SCREAMING_SNAKE_CASE__ ( ) -> None:
_lowercase = '''abc1abc12'''
_lowercase = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
_lowercase = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(a_ , a_ ) and not rabin_karp(a_ , a_ )
# Test 2)
_lowercase = '''ABABX'''
_lowercase = '''ABABZABABYABABX'''
assert rabin_karp(a_ , a_ )
# Test 3)
_lowercase = '''AAAB'''
_lowercase = '''ABAAAAAB'''
assert rabin_karp(a_ , a_ )
# Test 4)
_lowercase = '''abcdabcy'''
_lowercase = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(a_ , a_ )
# Test 5)
_lowercase = '''Lü'''
_lowercase = '''Lüsai'''
assert rabin_karp(a_ , a_ )
_lowercase = '''Lue'''
assert not rabin_karp(a_ , a_ )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp() | 718 |
import string
import numpy
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int , snake_case__ :int ) -> int:
return b if a == 0 else greatest_common_divisor(b % a , snake_case__ )
class A_ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
SCREAMING_SNAKE_CASE_ : Dict = numpy.vectorize(lambda UpperCAmelCase : x % 3_6 )
SCREAMING_SNAKE_CASE_ : List[Any] = numpy.vectorize(UpperCAmelCase )
def __init__( self : Optional[Any] ,__A : numpy.ndarray ) -> None:
_lowercase = self.modulus(__A ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
_lowercase = encrypt_key.shape[0]
def __UpperCAmelCase ( self : Tuple ,__A : str ) -> int:
return self.key_string.index(__A )
def __UpperCAmelCase ( self : Optional[int] ,__A : int ) -> str:
return self.key_string[round(__A )]
def __UpperCAmelCase ( self : str ) -> None:
_lowercase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_lowercase = det % len(self.key_string )
_lowercase = len(self.key_string )
if greatest_common_divisor(__A ,len(self.key_string ) ) != 1:
_lowercase = (
F"""determinant modular {req_l} of encryption key({det}) """
F"""is not co prime w.r.t {req_l}.\nTry another key."""
)
raise ValueError(__A )
def __UpperCAmelCase ( self : Any ,__A : str ) -> str:
_lowercase = [char for char in text.upper() if char in self.key_string]
_lowercase = chars[-1]
while len(__A ) % self.break_key != 0:
chars.append(__A )
return "".join(__A )
def __UpperCAmelCase ( self : Optional[int] ,__A : str ) -> str:
_lowercase = self.process_text(text.upper() )
_lowercase = ''
for i in range(0 ,len(__A ) - self.break_key + 1 ,self.break_key ):
_lowercase = text[i : i + self.break_key]
_lowercase = [self.replace_letters(__A ) for char in batch]
_lowercase = numpy.array([vec] ).T
_lowercase = self.modulus(self.encrypt_key.dot(__A ) ).T.tolist()[
0
]
_lowercase = ''.join(
self.replace_digits(__A ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def __UpperCAmelCase ( self : List[Any] ) -> numpy.ndarray:
_lowercase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_lowercase = det % len(self.key_string )
_lowercase = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
_lowercase = i
break
_lowercase = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(__A ) )
def __UpperCAmelCase ( self : Tuple ,__A : str ) -> str:
_lowercase = self.make_decrypt_key()
_lowercase = self.process_text(text.upper() )
_lowercase = ''
for i in range(0 ,len(__A ) - self.break_key + 1 ,self.break_key ):
_lowercase = text[i : i + self.break_key]
_lowercase = [self.replace_letters(__A ) for char in batch]
_lowercase = numpy.array([vec] ).T
_lowercase = self.modulus(decrypt_key.dot(__A ) ).T.tolist()[0]
_lowercase = ''.join(
self.replace_digits(__A ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def SCREAMING_SNAKE_CASE__ ( ) -> None:
_lowercase = int(input('Enter the order of the encryption key: ' ) )
_lowercase = []
print('Enter each row of the encryption key with space separated integers' )
for _ in range(snake_case__ ):
_lowercase = [int(snake_case__ ) for x in input().split()]
hill_matrix.append(snake_case__ )
_lowercase = HillCipher(numpy.array(snake_case__ ) )
print('Would you like to encrypt or decrypt some text? (1 or 2)' )
_lowercase = input('\n1. Encrypt\n2. Decrypt\n' )
if option == "1":
_lowercase = input('What text would you like to encrypt?: ' )
print('Your encrypted text is:' )
print(hc.encrypt(snake_case__ ) )
elif option == "2":
_lowercase = input('What text would you like to decrypt?: ' )
print('Your decrypted text is:' )
print(hc.decrypt(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 535 | 0 |
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase( self : List[Any] ):
a__ : Tuple = get_activation("swish" )
self.assertIsInstance(lowerCamelCase__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def _UpperCamelCase( self : str ):
a__ : Optional[Any] = get_activation("silu" )
self.assertIsInstance(lowerCamelCase__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def _UpperCamelCase( self : Optional[Any] ):
a__ : Optional[int] = get_activation("mish" )
self.assertIsInstance(lowerCamelCase__ , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def _UpperCamelCase( self : str ):
a__ : Dict = get_activation("gelu" )
self.assertIsInstance(lowerCamelCase__ , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 37 | # This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a = StableDiffusionControlNetImgaImgPipeline
__a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__a = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
__a = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase_ ( self ) -> str:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: int= UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: str= ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: str= DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[str]= AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[Any]= CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE__: List[str]= CLIPTextModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> Optional[Any]:
if str(lowerCAmelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__: Optional[int]= torch.manual_seed(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: Union[str, Any]= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= 2
SCREAMING_SNAKE_CASE__: Tuple= randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , )
SCREAMING_SNAKE_CASE__: int= floats_tensor(control_image.shape , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__: str= Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
SCREAMING_SNAKE_CASE__: Tuple= {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def UpperCamelCase_ ( self ) -> Tuple:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase_ ( self ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def UpperCamelCase_ ( self ) -> str:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a = StableDiffusionControlNetImgaImgPipeline
__a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__a = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def UpperCamelCase_ ( self ) -> Dict:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: int= UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(lowerCAmelCase ):
if isinstance(lowerCAmelCase , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
SCREAMING_SNAKE_CASE__: Any= ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCAmelCase )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCAmelCase )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Optional[int]= CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE__: Any= CLIPTextModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE__: Dict= MultiControlNetModel([controlneta, controlneta] )
SCREAMING_SNAKE_CASE__: int= {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> List[Any]:
if str(lowerCAmelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__: str= torch.manual_seed(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: Optional[int]= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= 2
SCREAMING_SNAKE_CASE__: Tuple= [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , ),
]
SCREAMING_SNAKE_CASE__: Union[str, Any]= floats_tensor(control_image[0].shape , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__: Union[str, Any]= Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
SCREAMING_SNAKE_CASE__: int= {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: List[Any]= self.get_dummy_components()
SCREAMING_SNAKE_CASE__: str= self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= 10.0
SCREAMING_SNAKE_CASE__: Any= 4
SCREAMING_SNAKE_CASE__: Optional[Any]= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= steps
SCREAMING_SNAKE_CASE__: int= scale
SCREAMING_SNAKE_CASE__: List[Any]= pipe(**lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE__: Tuple= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= steps
SCREAMING_SNAKE_CASE__: List[Any]= scale
SCREAMING_SNAKE_CASE__: int= pipe(**lowerCAmelCase , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
SCREAMING_SNAKE_CASE__: Dict= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= steps
SCREAMING_SNAKE_CASE__: List[Any]= scale
SCREAMING_SNAKE_CASE__: str= pipe(**lowerCAmelCase , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
SCREAMING_SNAKE_CASE__: Optional[int]= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= steps
SCREAMING_SNAKE_CASE__: int= scale
SCREAMING_SNAKE_CASE__: Any= pipe(**lowerCAmelCase , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def UpperCamelCase_ ( self ) -> int:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase_ ( self ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Any= self.get_dummy_components()
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(lowerCAmelCase )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Optional[int]= ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' )
SCREAMING_SNAKE_CASE__: Tuple= StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=lowerCAmelCase , controlnet=lowerCAmelCase )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[Any]= '''evil space-punk bird'''
SCREAMING_SNAKE_CASE__: List[str]= load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((512, 512) )
SCREAMING_SNAKE_CASE__: List[Any]= load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((512, 512) )
SCREAMING_SNAKE_CASE__: Optional[Any]= pipe(
lowerCAmelCase , lowerCAmelCase , control_image=lowerCAmelCase , generator=lowerCAmelCase , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
SCREAMING_SNAKE_CASE__: Union[str, Any]= output.images[0]
assert image.shape == (512, 512, 3)
SCREAMING_SNAKE_CASE__: str= load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' )
assert np.abs(expected_image - image ).max() < 9e-2
| 64 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class __A (snake_case__):
'''simple docstring'''
__lowercase: int = """unispeech-sat"""
def __init__( self : int , UpperCAmelCase_ : Union[str, Any]=32 , UpperCAmelCase_ : Optional[Any]=768 , UpperCAmelCase_ : Dict=12 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : Optional[int]=3_072 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Dict=0.02 , UpperCAmelCase_ : Union[str, Any]=1E-5 , UpperCAmelCase_ : Any="group" , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : List[str]=(512, 512, 512, 512, 512, 512, 512) , UpperCAmelCase_ : List[str]=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase_ : List[str]=(10, 3, 3, 3, 3, 2, 2) , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Dict=128 , UpperCAmelCase_ : Union[str, Any]=16 , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Dict=0.05 , UpperCAmelCase_ : List[Any]=10 , UpperCAmelCase_ : List[Any]=2 , UpperCAmelCase_ : Any=0.0 , UpperCAmelCase_ : List[Any]=10 , UpperCAmelCase_ : Dict=0 , UpperCAmelCase_ : str=320 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : List[str]=100 , UpperCAmelCase_ : Any=256 , UpperCAmelCase_ : List[Any]=256 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Any="mean" , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Optional[Any]=256 , UpperCAmelCase_ : Optional[Any]=(512, 512, 512, 512, 1_500) , UpperCAmelCase_ : Optional[Any]=(5, 3, 3, 1, 1) , UpperCAmelCase_ : Dict=(1, 2, 3, 1, 1) , UpperCAmelCase_ : str=512 , UpperCAmelCase_ : int=0 , UpperCAmelCase_ : Any=1 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : Tuple=504 , **UpperCAmelCase_ : str , ) ->Any:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ )
snake_case_ = hidden_size
snake_case_ = feat_extract_norm
snake_case_ = feat_extract_activation
snake_case_ = list(UpperCAmelCase_ )
snake_case_ = list(UpperCAmelCase_ )
snake_case_ = list(UpperCAmelCase_ )
snake_case_ = conv_bias
snake_case_ = num_conv_pos_embeddings
snake_case_ = num_conv_pos_embedding_groups
snake_case_ = len(self.conv_dim )
snake_case_ = num_hidden_layers
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = num_attention_heads
snake_case_ = hidden_dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = feat_proj_dropout
snake_case_ = final_dropout
snake_case_ = layerdrop
snake_case_ = layer_norm_eps
snake_case_ = initializer_range
snake_case_ = vocab_size
snake_case_ = num_clusters
snake_case_ = do_stable_layer_norm
snake_case_ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case_ = apply_spec_augment
snake_case_ = mask_time_prob
snake_case_ = mask_time_length
snake_case_ = mask_time_min_masks
snake_case_ = mask_feature_prob
snake_case_ = mask_feature_length
snake_case_ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
snake_case_ = num_codevectors_per_group
snake_case_ = num_codevector_groups
snake_case_ = contrastive_logits_temperature
snake_case_ = feat_quantizer_dropout
snake_case_ = num_negatives
snake_case_ = codevector_dim
snake_case_ = proj_codevector_dim
snake_case_ = diversity_loss_weight
# ctc loss
snake_case_ = ctc_loss_reduction
snake_case_ = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
snake_case_ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
snake_case_ = list(UpperCAmelCase_ )
snake_case_ = list(UpperCAmelCase_ )
snake_case_ = list(UpperCAmelCase_ )
snake_case_ = xvector_output_dim
@property
def lowerCAmelCase ( self : Tuple ) ->List[Any]:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 2 |
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def _a ( _SCREAMING_SNAKE_CASE = 8 ) -> str:
snake_case_ = ascii_letters + digits + punctuation
return "".join(secrets.choice(_SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE ) )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(_SCREAMING_SNAKE_CASE )
snake_case_ = i // 3
snake_case_ = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
snake_case_ = (
chars_incl
+ random(_SCREAMING_SNAKE_CASE , quotient + remainder )
+ random(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
+ random(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
)
snake_case_ = list(_SCREAMING_SNAKE_CASE )
shuffle(_SCREAMING_SNAKE_CASE )
return "".join(_SCREAMING_SNAKE_CASE )
# random is a generalised function for letters, characters and numbers
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
return "".join(secrets.choice(_SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE ) )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
pass # Put your code here...
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
pass # Put your code here...
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
pass # Put your code here...
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 8 ) -> bool:
if len(_SCREAMING_SNAKE_CASE ) < min_length:
# Your Password must be at least 8 characters long
return False
snake_case_ = any(char in ascii_uppercase for char in password )
snake_case_ = any(char in ascii_lowercase for char in password )
snake_case_ = any(char in digits for char in password )
snake_case_ = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def _a ( ) -> str:
snake_case_ = int(input("""Please indicate the max length of your password: """ ).strip() )
snake_case_ = input(
"""Please indicate the characters that must be in your password: """ ).strip()
print("""Password generated:""" , password_generator(_SCREAMING_SNAKE_CASE ) )
print(
"""Alternative Password generated:""" , alternative_password_generator(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , )
print("""[If you are thinking of using this passsword, You better save it.]""" )
if __name__ == "__main__":
main()
| 2 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 94 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _snake_case ( _a ):
_A : Optional[int] = ['''image_processor''', '''tokenizer''']
_A : Union[str, Any] = '''ViTImageProcessor'''
_A : List[str] = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Optional[Any]=None ,SCREAMING_SNAKE_CASE__ : int=None ,**SCREAMING_SNAKE_CASE__ : Tuple ):
SCREAMING_SNAKE_CASE:str = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." ,SCREAMING_SNAKE_CASE__ ,)
SCREAMING_SNAKE_CASE:int = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE:List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def __call__( self : Any ,SCREAMING_SNAKE_CASE__ : List[str]=None ,SCREAMING_SNAKE_CASE__ : Optional[Any]=None ,SCREAMING_SNAKE_CASE__ : Optional[int]=None ,SCREAMING_SNAKE_CASE__ : str=None ,**SCREAMING_SNAKE_CASE__ : str ):
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
SCREAMING_SNAKE_CASE:int = self.tokenizer(SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
if visual_prompt is not None:
SCREAMING_SNAKE_CASE:Tuple = self.image_processor(SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
if images is not None:
SCREAMING_SNAKE_CASE:List[Any] = self.image_processor(SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
if visual_prompt is not None and images is not None:
SCREAMING_SNAKE_CASE:Union[str, Any] = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
SCREAMING_SNAKE_CASE:int = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
SCREAMING_SNAKE_CASE:str = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE__ ) ,tensor_type=SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Dict ,*SCREAMING_SNAKE_CASE__ : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : int ):
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : List[Any] ,*SCREAMING_SNAKE_CASE__ : Dict ,**SCREAMING_SNAKE_CASE__ : Any ):
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
@property
def __UpperCamelCase ( self : Optional[Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." ,SCREAMING_SNAKE_CASE__ ,)
return self.image_processor_class
@property
def __UpperCamelCase ( self : int ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." ,SCREAMING_SNAKE_CASE__ ,)
return self.image_processor
| 143 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCAmelCase_ ( unittest.TestCase ):
@property
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case : Any =UNetaDModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=('''DownBlock2D''', '''AttnDownBlock2D'''), up_block_types=('''AttnUpBlock2D''', '''UpBlock2D'''), )
return model
def __snake_case ( self : List[str] ):
'''simple docstring'''
snake_case : Optional[int] =self.dummy_uncond_unet
snake_case : Optional[Any] =PNDMScheduler()
snake_case : Optional[Any] =PNDMPipeline(unet=lowerCamelCase_, scheduler=lowerCamelCase_ )
pndm.to(lowerCamelCase_ )
pndm.set_progress_bar_config(disable=lowerCamelCase_ )
snake_case : Tuple =torch.manual_seed(0 )
snake_case : List[str] =pndm(generator=lowerCamelCase_, num_inference_steps=20, output_type='''numpy''' ).images
snake_case : Optional[int] =torch.manual_seed(0 )
snake_case : Optional[int] =pndm(generator=lowerCamelCase_, num_inference_steps=20, output_type='''numpy''', return_dict=lowerCamelCase_ )[0]
snake_case : int =image[0, -3:, -3:, -1]
snake_case : Optional[Any] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case : Tuple =np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
snake_case : Optional[Any] ='''google/ddpm-cifar10-32'''
snake_case : Union[str, Any] =UNetaDModel.from_pretrained(lowerCamelCase_ )
snake_case : str =PNDMScheduler()
snake_case : Dict =PNDMPipeline(unet=lowerCamelCase_, scheduler=lowerCamelCase_ )
pndm.to(lowerCamelCase_ )
pndm.set_progress_bar_config(disable=lowerCamelCase_ )
snake_case : List[Any] =torch.manual_seed(0 )
snake_case : Tuple =pndm(generator=lowerCamelCase_, output_type='''numpy''' ).images
snake_case : Optional[int] =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case : int =np.array([0.1564, 0.1_4645, 0.1406, 0.1_4715, 0.1_2425, 0.1_4045, 0.1_3115, 0.1_2175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 710 |
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
A : int = get_tests_dir("""fixtures/test_sentencepiece_with_bytefallback.model""")
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( a_ , unittest.TestCase ):
__UpperCAmelCase = GPTSwaTokenizer
__UpperCAmelCase = False
__UpperCAmelCase = True
__UpperCAmelCase = False
def __snake_case ( self : List[str] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
snake_case : int =GPTSwaTokenizer(_snake_case, eos_token='''<unk>''', bos_token='''<unk>''', pad_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def __snake_case ( self : Optional[Any], _snake_case : Tuple ):
'''simple docstring'''
snake_case : int ='''This is a test'''
snake_case : Union[str, Any] ='''This is a test'''
return input_text, output_text
def __snake_case ( self : Tuple ):
'''simple docstring'''
snake_case : Dict ='''<s>'''
snake_case : Optional[Any] =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ), _snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ), _snake_case )
def __snake_case ( self : int ):
'''simple docstring'''
snake_case : Optional[int] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], '''<unk>''' )
self.assertEqual(vocab_keys[1], '''<s>''' )
self.assertEqual(vocab_keys[-1], '''j''' )
self.assertEqual(len(_snake_case ), 2_000 )
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size, 2_000 )
def __snake_case ( self : Tuple ):
'''simple docstring'''
snake_case : List[Any] =GPTSwaTokenizer(_snake_case )
snake_case : Any =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_snake_case, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ), [465, 287, 265, 631, 842] )
snake_case : List[Any] =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
# fmt: off
self.assertListEqual(
_snake_case, ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''], )
# fmt: on
snake_case : List[Any] =tokenizer.convert_tokens_to_ids(_snake_case )
self.assertListEqual(
_snake_case, [262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260], )
snake_case : Optional[Any] =tokenizer.convert_ids_to_tokens(_snake_case )
# fmt: off
self.assertListEqual(
_snake_case, ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] )
# fmt: on
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
snake_case : int =GPTSwaTokenizer(_snake_case )
snake_case : Dict =['''This is a test''', '''I was born in 92000, and this is falsé.''']
snake_case : Union[str, Any] =[
[465, 287, 265, 631, 842],
[262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(_snake_case, _snake_case ):
self.assertListEqual(tokenizer.encode_fast(_snake_case ), _snake_case )
# Test that decode_fast returns the input text
for text, token_ids in zip(_snake_case, _snake_case ):
self.assertEqual(tokenizer.decode_fast(_snake_case ), _snake_case )
@slow
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
snake_case : Optional[int] =[
'''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''',
'''Hey there, how are you doing this fine day?''',
'''This is a text with a trailing spaces followed by a dot .''',
'''Häj sväjs lillebrör! =)''',
'''Det är inget fel på Mr. Cool''',
]
# fmt: off
snake_case : int ={'''input_ids''': [[63_423, 5, 6_811, 14_954, 282, 816, 3_821, 63_466, 63_425, 63_462, 18, 63_978, 678, 301, 1_320, 63_423, 63_455, 63_458, 18, 63_982, 4_246, 3_940, 1_901, 47_789, 5_547, 18_994], [19_630, 1_100, 63_446, 1_342, 633, 544, 4_488, 593, 5_102, 2_416, 63_495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_652, 428, 268, 1_936, 515, 268, 58_593, 22_413, 9_106, 546, 268, 33_213, 63_979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55_130, 63_450, 924, 63_449, 2_249, 4_062, 1_558, 318, 63_504, 21_498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2_827, 2_559, 332, 6_575, 63_443, 26_801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case, model_name='''AI-Sweden/gpt-sw3-126m''', sequences=_snake_case, )
| 136 | 0 |
from __future__ import annotations
a : Optional[int] = list[tuple[int, int]]
a : int = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a : Dict = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class a :
"""simple docstring"""
def __init__( self : Optional[int] , __lowercase : int , __lowercase : int , __lowercase : int , __lowercase : int , __lowercase : float , __lowercase : Node | None , ) -> List[str]:
__UpperCAmelCase : Dict = pos_x
__UpperCAmelCase : Optional[int] = pos_y
__UpperCAmelCase : str = (pos_y, pos_x)
__UpperCAmelCase : Dict = goal_x
__UpperCAmelCase : List[Any] = goal_y
__UpperCAmelCase : List[Any] = g_cost
__UpperCAmelCase : List[Any] = parent
__UpperCAmelCase : List[Any] = self.calculate_heuristic()
def UpperCAmelCase ( self : str ) -> float:
__UpperCAmelCase : Tuple = abs(self.pos_x - self.goal_x )
__UpperCAmelCase : Optional[int] = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self : Optional[int] , __lowercase : List[Any] ) -> bool:
return self.f_cost < other.f_cost
class a :
"""simple docstring"""
def __init__( self : List[str] , __lowercase : tuple[int, int] , __lowercase : tuple[int, int] ) -> Optional[int]:
__UpperCAmelCase : Dict = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __lowercase )
__UpperCAmelCase : List[Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , __lowercase )
__UpperCAmelCase : int = [self.start]
__UpperCAmelCase : list[Node] = []
__UpperCAmelCase : List[Any] = False
def UpperCAmelCase ( self : Any ) -> Path | None:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__UpperCAmelCase : Dict = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
__UpperCAmelCase : List[str] = True
return self.retrace_path(__lowercase )
self.closed_nodes.append(__lowercase )
__UpperCAmelCase : List[Any] = self.get_successors(__lowercase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__lowercase )
else:
# retrieve the best current path
__UpperCAmelCase : str = self.open_nodes.pop(self.open_nodes.index(__lowercase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__lowercase )
else:
self.open_nodes.append(__lowercase )
if not self.reached:
return [self.start.pos]
return None
def UpperCAmelCase ( self : Dict , __lowercase : Node ) -> list[Node]:
__UpperCAmelCase : List[Any] = []
for action in delta:
__UpperCAmelCase : str = parent.pos_x + action[1]
__UpperCAmelCase : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__lowercase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__lowercase , __lowercase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __lowercase , ) )
return successors
def UpperCAmelCase ( self : Optional[int] , __lowercase : Node | None ) -> Path:
__UpperCAmelCase : Optional[Any] = node
__UpperCAmelCase : Optional[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__UpperCAmelCase : Any = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
a : Union[str, Any] = (0, 0)
a : Dict = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("------")
a : Union[str, Any] = GreedyBestFirst(init, goal)
a : Optional[Any] = greedy_bf.search()
if path:
for pos_x, pos_y in path:
a : Union[str, Any] = 2
for elem in grid:
print(elem)
| 63 |
from manim import *
class __a ( SCREAMING_SNAKE_CASE ):
def UpperCamelCase ( self : Tuple)-> Dict:
__lowerCAmelCase =Rectangle(height=0.5 , width=0.5)
__lowerCAmelCase =Rectangle(height=0.4_6 , width=0.4_6).set_stroke(width=0)
__lowerCAmelCase =[mem.copy() for i in range(6)]
__lowerCAmelCase =[mem.copy() for i in range(6)]
__lowerCAmelCase =VGroup(*snake_case_).arrange(snake_case_ , buff=0)
__lowerCAmelCase =VGroup(*snake_case_).arrange(snake_case_ , buff=0)
__lowerCAmelCase =VGroup(snake_case_ , snake_case_).arrange(snake_case_ , buff=0)
__lowerCAmelCase =Text("""CPU""" , font_size=24)
__lowerCAmelCase =Group(snake_case_ , snake_case_).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_)
cpu.move_to([-2.5, -0.5, 0])
self.add(snake_case_)
__lowerCAmelCase =[mem.copy() for i in range(1)]
__lowerCAmelCase =VGroup(*snake_case_).arrange(snake_case_ , buff=0)
__lowerCAmelCase =Text("""GPU""" , font_size=24)
__lowerCAmelCase =Group(snake_case_ , snake_case_).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_)
gpu.align_to(snake_case_ , snake_case_)
gpu.set_x(gpu.get_x() - 1)
self.add(snake_case_)
__lowerCAmelCase =[mem.copy() for i in range(6)]
__lowerCAmelCase =VGroup(*snake_case_).arrange(snake_case_ , buff=0)
__lowerCAmelCase =Text("""Model""" , font_size=24)
__lowerCAmelCase =Group(snake_case_ , snake_case_).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_)
model.move_to([3, -1.0, 0])
self.play(
Create(snake_case_ , run_time=1) , Create(snake_case_ , run_time=1) , Create(snake_case_ , run_time=1) , )
__lowerCAmelCase =MarkupText(
F"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=24 , )
__lowerCAmelCase =Square(side_length=2.2)
key.move_to([-5, 2, 0])
__lowerCAmelCase =MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0])
step_a.move_to([2, 2, 0])
self.play(Write(snake_case_ , run_time=2.5) , Write(snake_case_) , Write(snake_case_))
self.add(snake_case_)
__lowerCAmelCase =[]
__lowerCAmelCase =[]
__lowerCAmelCase =[]
for i, rect in enumerate(snake_case_):
__lowerCAmelCase =Rectangle(height=0.4_6 , width=0.4_6).set_stroke(width=0.0).set_fill(snake_case_ , opacity=0.7)
cpu_target.move_to(snake_case_)
cpu_target.generate_target()
__lowerCAmelCase =0.4_6 / 4
__lowerCAmelCase =0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.0_2 , direction=snake_case_)
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1)
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=snake_case_ , buff=0.0)
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=snake_case_ , buff=0.0)
cpu_targs.append(snake_case_)
first_animations.append(rect.animate(run_time=0.5).set_stroke(snake_case_))
second_animations.append(MoveToTarget(snake_case_ , run_time=1.5))
self.play(*snake_case_)
self.play(*snake_case_)
self.wait()
| 354 | 0 |
"""simple docstring"""
import numpy as np
A_ = [
['''a''', '''b''', '''c''', '''d''', '''e'''],
['''f''', '''g''', '''h''', '''i''', '''k'''],
['''l''', '''m''', '''n''', '''o''', '''p'''],
['''q''', '''r''', '''s''', '''t''', '''u'''],
['''v''', '''w''', '''x''', '''y''', '''z'''],
]
class lowercase:
'''simple docstring'''
def __init__( self: Dict ):
'''simple docstring'''
_snake_case : List[Any] = np.array(a_ )
def UpperCamelCase_ ( self: Union[str, Any], a_: str ):
'''simple docstring'''
_snake_case , _snake_case : Union[str, Any] = np.where(letter == self.SQUARE )
_snake_case : Dict = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def UpperCamelCase_ ( self: Union[str, Any], a_: int, a_: int ):
'''simple docstring'''
_snake_case : List[str] = self.SQUARE[indexa - 1, indexa - 1]
return letter
def UpperCamelCase_ ( self: Any, a_: str ):
'''simple docstring'''
_snake_case : int = message.lower()
_snake_case : str = message.replace(""" """, """""" )
_snake_case : Optional[int] = message.replace("""j""", """i""" )
_snake_case : List[str] = np.empty((2, len(a_ )) )
for letter_index in range(len(a_ ) ):
_snake_case : List[Any] = self.letter_to_numbers(message[letter_index] )
_snake_case : Union[str, Any] = numbers[0]
_snake_case : Optional[int] = numbers[1]
_snake_case : Tuple = first_step.reshape(2 * len(a_ ) )
_snake_case : Optional[Any] = """"""
for numbers_index in range(len(a_ ) ):
_snake_case : int = int(second_step[numbers_index * 2] )
_snake_case : Tuple = int(second_step[(numbers_index * 2) + 1] )
_snake_case : Optional[int] = self.numbers_to_letter(a_, a_ )
_snake_case : int = encoded_message + letter
return encoded_message
def UpperCamelCase_ ( self: Any, a_: str ):
'''simple docstring'''
_snake_case : List[Any] = message.lower()
message.replace(""" """, """""" )
_snake_case : Any = np.empty(2 * len(a_ ) )
for letter_index in range(len(a_ ) ):
_snake_case : List[str] = self.letter_to_numbers(message[letter_index] )
_snake_case : str = numbers[0]
_snake_case : Dict = numbers[1]
_snake_case : int = first_step.reshape((2, len(a_ )) )
_snake_case : Optional[Any] = """"""
for numbers_index in range(len(a_ ) ):
_snake_case : Union[str, Any] = int(second_step[0, numbers_index] )
_snake_case : Tuple = int(second_step[1, numbers_index] )
_snake_case : List[str] = self.numbers_to_letter(a_, a_ )
_snake_case : List[str] = decoded_message + letter
return decoded_message
| 28 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase:
'''simple docstring'''
def __init__( self: List[str], a_: List[Any], a_: str=13, a_: Dict=32, a_: Union[str, Any]=3, a_: Union[str, Any]=4, a_: Tuple=[10, 20, 30, 40], a_: Dict=[2, 2, 3, 2], a_: Tuple=True, a_: Optional[Any]=True, a_: Any=37, a_: Any="gelu", a_: int=10, a_: Tuple=0.02, a_: str=["stage2", "stage3", "stage4"], a_: List[str]=[2, 3, 4], a_: List[str]=None, ):
'''simple docstring'''
_snake_case : int = parent
_snake_case : int = batch_size
_snake_case : List[Any] = image_size
_snake_case : List[str] = num_channels
_snake_case : Tuple = num_stages
_snake_case : Union[str, Any] = hidden_sizes
_snake_case : List[Any] = depths
_snake_case : Tuple = is_training
_snake_case : List[str] = use_labels
_snake_case : Tuple = intermediate_size
_snake_case : List[str] = hidden_act
_snake_case : Optional[Any] = num_labels
_snake_case : Tuple = initializer_range
_snake_case : Tuple = out_features
_snake_case : Tuple = out_indices
_snake_case : Dict = scope
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : Any = None
if self.use_labels:
_snake_case : Dict = ids_tensor([self.batch_size], self.num_labels )
_snake_case : Optional[Any] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, num_stages=self.num_stages, hidden_act=self.hidden_act, is_decoder=a_, initializer_range=self.initializer_range, out_features=self.out_features, out_indices=self.out_indices, num_labels=self.num_labels, )
def UpperCamelCase_ ( self: int, a_: Tuple, a_: Any, a_: Dict ):
'''simple docstring'''
_snake_case : int = ConvNextVaModel(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Any = model(a_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def UpperCamelCase_ ( self: Optional[int], a_: List[str], a_: Tuple, a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[int] = ConvNextVaForImageClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : Optional[int] = model(a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self: Union[str, Any], a_: Tuple, a_: Tuple, a_: Tuple ):
'''simple docstring'''
_snake_case : List[str] = ConvNextVaBackbone(config=a_ )
model.to(a_ )
model.eval()
_snake_case : int = model(a_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ), len(config.out_features ) )
self.parent.assertListEqual(model.channels, config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_snake_case : Tuple = None
_snake_case : Tuple = ConvNextVaBackbone(config=a_ )
model.to(a_ )
model.eval()
_snake_case : List[Any] = model(a_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ), 1 )
self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Dict = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case : Any = config_and_inputs
_snake_case : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case : List[str] = config_and_inputs
_snake_case : Any = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class lowercase( __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowercase__ = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Tuple = ConvNextVaModelTester(self )
_snake_case : int = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_snake_case , _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_with_labels()
_snake_case : List[Any] = True
if model_class.__name__ in [
*get_values(a_ ),
*get_values(a_ ),
]:
continue
_snake_case : Tuple = model_class(a_ )
model.to(a_ )
model.train()
_snake_case : Optional[Any] = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : Any = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_with_labels()
_snake_case : Any = False
_snake_case : List[Any] = True
if (
model_class.__name__
in [*get_values(a_ ), *get_values(a_ )]
or not model_class.supports_gradient_checkpointing
):
continue
_snake_case : Dict = model_class(a_ )
model.to(a_ )
model.gradient_checkpointing_enable()
model.train()
_snake_case : str = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : Optional[int] = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : List[str] = model_class(a_ )
_snake_case : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : int = [*signature.parameters.keys()]
_snake_case : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
def check_hidden_states_output(a_: str, a_: Tuple, a_: Tuple ):
_snake_case : Optional[Any] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
_snake_case : Any = model(**self._prepare_for_class(a_, a_ ) )
_snake_case : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(a_ ), expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
_snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Optional[Any] = True
check_hidden_states_output(a_, a_, a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : List[str] = True
check_hidden_states_output(a_, a_, a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : str = ConvNextVaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(a_ )
_snake_case : Union[str, Any] = self.default_image_processor
_snake_case : List[Any] = prepare_img()
_snake_case : Optional[int] = preprocessor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Optional[int] = model(**a_ )
# verify the logits
_snake_case : Optional[int] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape, a_ )
_snake_case : Optional[int] = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], a_, atol=1E-4 ) )
| 28 | 1 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ (lowerCAmelCase__ ):
lowercase_ : Union[str, Any] = (CMStochasticIterativeScheduler,)
lowercase_ : Optional[Any] = 10
def A__ ( self : Optional[Any] , **__lowerCamelCase : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = {
'''num_train_timesteps''': 2_01,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
config.update(**__lowerCAmelCase )
return config
def A__ ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = 10
lowerCAmelCase__ = self.get_scheduler_config()
lowerCAmelCase__ = self.scheduler_classes[0](**__lowerCAmelCase )
scheduler.set_timesteps(__lowerCAmelCase )
lowerCAmelCase__ = scheduler.timesteps[0]
lowerCAmelCase__ = scheduler.timesteps[1]
lowerCAmelCase__ = self.dummy_sample
lowerCAmelCase__ = 0.1 * sample
lowerCAmelCase__ = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
lowerCAmelCase__ = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def A__ ( self : List[Any] ):
"""simple docstring"""
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def A__ ( self : Dict ):
"""simple docstring"""
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=__lowerCAmelCase )
def A__ ( self : int ):
"""simple docstring"""
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config()
lowerCAmelCase__ = scheduler_class(**__lowerCAmelCase )
lowerCAmelCase__ = 1
scheduler.set_timesteps(__lowerCAmelCase )
lowerCAmelCase__ = scheduler.timesteps
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = self.dummy_model()
lowerCAmelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(__lowerCAmelCase ):
# 1. scale model input
lowerCAmelCase__ = scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase )
# 2. predict noise residual
lowerCAmelCase__ = model(__lowerCAmelCase , __lowerCAmelCase )
# 3. predict previous sample x_t-1
lowerCAmelCase__ = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample
lowerCAmelCase__ = pred_prev_sample
lowerCAmelCase__ = torch.sum(torch.abs(__lowerCAmelCase ) )
lowerCAmelCase__ = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 192.7614 ) < 1e-2
assert abs(result_mean.item() - 0.2510 ) < 1e-3
def A__ ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config()
lowerCAmelCase__ = scheduler_class(**__lowerCAmelCase )
lowerCAmelCase__ = [1_06, 0]
scheduler.set_timesteps(timesteps=__lowerCAmelCase )
lowerCAmelCase__ = scheduler.timesteps
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = self.dummy_model()
lowerCAmelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
lowerCAmelCase__ = scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase )
# 2. predict noise residual
lowerCAmelCase__ = model(__lowerCAmelCase , __lowerCAmelCase )
# 3. predict previous sample x_t-1
lowerCAmelCase__ = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample
lowerCAmelCase__ = pred_prev_sample
lowerCAmelCase__ = torch.sum(torch.abs(__lowerCAmelCase ) )
lowerCAmelCase__ = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 347.6357 ) < 1e-2
assert abs(result_mean.item() - 0.4527 ) < 1e-3
def A__ ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config()
lowerCAmelCase__ = scheduler_class(**__lowerCAmelCase )
lowerCAmelCase__ = [39, 30, 12, 15, 0]
with self.assertRaises(__lowerCAmelCase , msg='''`timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=__lowerCAmelCase )
def A__ ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config()
lowerCAmelCase__ = scheduler_class(**__lowerCAmelCase )
lowerCAmelCase__ = [39, 30, 12, 1, 0]
lowerCAmelCase__ = len(__lowerCAmelCase )
with self.assertRaises(__lowerCAmelCase , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=__lowerCAmelCase , timesteps=__lowerCAmelCase )
def A__ ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config()
lowerCAmelCase__ = scheduler_class(**__lowerCAmelCase )
lowerCAmelCase__ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__lowerCAmelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=__lowerCAmelCase )
| 615 | '''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowercase = '''
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
>>> repo = "openai/shap-e-img2img"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"
>>> image = load_image(image_url).convert("RGB")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], "corgi_3d.gif")
```
'''
@dataclass
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Union[PIL.Image.Image, np.ndarray]
class a__( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
super().__init__()
self.register_modules(
prior=__lowerCAmelCase , image_encoder=__lowerCAmelCase , image_processor=__lowerCAmelCase , scheduler=__lowerCAmelCase , renderer=__lowerCAmelCase , )
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
if latents is None:
lowerCAmelCase = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase , device=__lowerCAmelCase , dtype=__lowerCAmelCase)
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
lowerCAmelCase = latents.to(__lowerCAmelCase)
lowerCAmelCase = latents * scheduler.init_noise_sigma
return latents
def a_ ( self , __lowerCAmelCase=0):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""")
lowerCAmelCase = torch.device(f"cuda:{gpu_id}")
lowerCAmelCase = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__lowerCAmelCase , __lowerCAmelCase)
@property
def a_ ( self):
"""simple docstring"""
if self.device != torch.device("""meta""") or not hasattr(self.image_encoder , """_hf_hook"""):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(__lowerCAmelCase , """_hf_hook""")
and hasattr(module._hf_hook , """execution_device""")
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
if isinstance(__lowerCAmelCase , __lowerCAmelCase) and isinstance(image[0] , torch.Tensor):
lowerCAmelCase = torch.cat(__lowerCAmelCase , axis=0) if image[0].ndim == 4 else torch.stack(__lowerCAmelCase , axis=0)
if not isinstance(__lowerCAmelCase , torch.Tensor):
lowerCAmelCase = self.image_processor(__lowerCAmelCase , return_tensors="""pt""").pixel_values[0].unsqueeze(0)
lowerCAmelCase = image.to(dtype=self.image_encoder.dtype , device=__lowerCAmelCase)
lowerCAmelCase = self.image_encoder(__lowerCAmelCase)["""last_hidden_state"""]
lowerCAmelCase = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowerCAmelCase = image_embeds.repeat_interleave(__lowerCAmelCase , dim=0)
if do_classifier_free_guidance:
lowerCAmelCase = torch.zeros_like(__lowerCAmelCase)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCAmelCase = torch.cat([negative_image_embeds, image_embeds])
return image_embeds
@torch.no_grad()
@replace_example_docstring(__lowerCAmelCase)
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = 1 , __lowerCAmelCase = 25 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 4.0 , __lowerCAmelCase = 64 , __lowerCAmelCase = "pil" , __lowerCAmelCase = True , ):
"""simple docstring"""
if isinstance(__lowerCAmelCase , PIL.Image.Image):
lowerCAmelCase = 1
elif isinstance(__lowerCAmelCase , torch.Tensor):
lowerCAmelCase = image.shape[0]
elif isinstance(__lowerCAmelCase , __lowerCAmelCase) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image)):
lowerCAmelCase = len(__lowerCAmelCase)
else:
raise ValueError(
f"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__lowerCAmelCase)}")
lowerCAmelCase = self._execution_device
lowerCAmelCase = batch_size * num_images_per_prompt
lowerCAmelCase = guidance_scale > 1.0
lowerCAmelCase = self._encode_image(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
# prior
self.scheduler.set_timesteps(__lowerCAmelCase , device=__lowerCAmelCase)
lowerCAmelCase = self.scheduler.timesteps
lowerCAmelCase = self.prior.config.num_embeddings
lowerCAmelCase = self.prior.config.embedding_dim
lowerCAmelCase = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowerCAmelCase = latents.reshape(latents.shape[0] , __lowerCAmelCase , __lowerCAmelCase)
for i, t in enumerate(self.progress_bar(__lowerCAmelCase)):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
lowerCAmelCase = self.scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = self.prior(
__lowerCAmelCase , timestep=__lowerCAmelCase , proj_embedding=__lowerCAmelCase , ).predicted_image_embedding
# remove the variance
lowerCAmelCase , lowerCAmelCase = noise_pred.split(
scaled_model_input.shape[2] , dim=2) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowerCAmelCase , lowerCAmelCase = noise_pred.chunk(2)
lowerCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowerCAmelCase = self.scheduler.step(
__lowerCAmelCase , timestep=__lowerCAmelCase , sample=__lowerCAmelCase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=__lowerCAmelCase)
lowerCAmelCase = []
for i, latent in enumerate(__lowerCAmelCase):
print()
lowerCAmelCase = self.renderer.decode(
latent[None, :] , __lowerCAmelCase , size=__lowerCAmelCase , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(__lowerCAmelCase)
lowerCAmelCase = torch.stack(__lowerCAmelCase)
if output_type not in ["np", "pil"]:
raise ValueError(f"Only the output types `pil` and `np` are supported not output_type={output_type}")
lowerCAmelCase = images.cpu().numpy()
if output_type == "pil":
lowerCAmelCase = [self.numpy_to_pil(__lowerCAmelCase) for image in images]
# Offload last model to CPU
if hasattr(self , """final_offload_hook""") and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=__lowerCAmelCase)
| 370 | 0 |
"""simple docstring"""
import re
def _UpperCAmelCase ( lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
return [char.split() for char in re.split(r"""[^ a-z A-Z 0-9 \s]""" , str_ )]
def _UpperCAmelCase ( lowerCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = split_input(str_ )
return "".join(
["""""".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
"""simple docstring"""
try:
lowerCAmelCase__ = split_input(lowerCamelCase__ )
if upper:
lowerCAmelCase__ = """""".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
lowerCAmelCase__ = """""".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def _UpperCAmelCase ( lowerCamelCase__ ) -> Dict:
"""simple docstring"""
return to_simple_case(lowerCamelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ ) -> Any:
"""simple docstring"""
try:
lowerCAmelCase__ = to_simple_case(lowerCamelCase__ )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ) -> Any:
"""simple docstring"""
return to_complex_case(lowerCamelCase__ , lowerCamelCase__ , """_""" )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ) -> str:
"""simple docstring"""
return to_complex_case(lowerCamelCase__ , lowerCamelCase__ , """-""" )
if __name__ == "__main__":
__import__("doctest").testmod()
| 719 | """simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a_ :
def __init__( self : Optional[int] , snake_case__ : List[Any]=2 , snake_case__ : Any=3 , snake_case__ : Union[str, Any]=64 , snake_case__ : Any=None ):
lowerCAmelCase__ = np.random.default_rng(snake_case__ )
lowerCAmelCase__ = length
lowerCAmelCase__ = rng.normal(size=(length,) ).astype(np.floataa )
lowerCAmelCase__ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : Optional[Any] ):
return self.length
def __getitem__( self : List[str] , snake_case__ : Optional[int] ):
return {"x": self.x[i], "y": self.y[i]}
class a_ ( torch.nn.Module ):
def __init__( self : List[str] , snake_case__ : str=0 , snake_case__ : Dict=0 , snake_case__ : Any=False ):
super().__init__()
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase__ = True
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : Any=None ):
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase__ = False
return x * self.a[0] + self.b[0]
class a_ ( torch.nn.Module ):
def __init__( self : Any , snake_case__ : Union[str, Any]=0 , snake_case__ : Union[str, Any]=0 , snake_case__ : List[Any]=False ):
super().__init__()
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(snake_case__ ).float() )
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(snake_case__ ).float() )
lowerCAmelCase__ = True
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Optional[Any]=None ):
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase__ = False
return x * self.a + self.b
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ = 16 ):
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
lowerCAmelCase__ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCAmelCase__ = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
lowerCAmelCase__ = load_dataset("""csv""" , data_files=lowerCamelCase__ )
lowerCAmelCase__ = datasets["""train"""].unique("""label""" )
lowerCAmelCase__ = {v: i for i, v in enumerate(lowerCamelCase__ )}
def tokenize_function(lowerCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ = tokenizer(
examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , padding="""max_length""" )
if "label" in examples:
lowerCAmelCase__ = [label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase__ = datasets.map(
lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=["""sentence1""", """sentence2""", """label"""] , )
def collate_fn(lowerCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCamelCase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowerCamelCase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
lowerCAmelCase__ = DataLoader(tokenized_datasets["""train"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=2 )
lowerCAmelCase__ = DataLoader(tokenized_datasets["""validation"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=1 )
return train_dataloader, eval_dataloader
| 674 | 0 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any]=1024 , __UpperCamelCase : List[str]=1024 , __UpperCamelCase : List[Any]=False , **__UpperCamelCase : List[str] ) -> str:
UpperCAmelCase_ = AutoTokenizer.from_pretrained(__UpperCamelCase )
UpperCAmelCase_ = SeqaSeqDataset(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , type_path='''train''' , **__UpperCamelCase )
UpperCAmelCase_ = tok.pad_token_id
def get_lens(__UpperCamelCase : Dict ):
UpperCAmelCase_ = tqdm(
DataLoader(__UpperCamelCase , batch_size=512 , num_workers=8 , shuffle=__UpperCamelCase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
UpperCAmelCase_ = []
for batch in dl:
UpperCAmelCase_ = batch['''input_ids'''].ne(__UpperCamelCase ).sum(1 ).tolist()
UpperCAmelCase_ = batch['''labels'''].ne(__UpperCamelCase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(__UpperCamelCase , __UpperCamelCase ):
max_lens.append(max(__UpperCamelCase , __UpperCamelCase ) )
else:
max_lens.extend(__UpperCamelCase )
return max_lens
UpperCAmelCase_ = get_lens(__UpperCamelCase )
UpperCAmelCase_ = SeqaSeqDataset(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , type_path='''val''' , **__UpperCamelCase )
UpperCAmelCase_ = get_lens(__UpperCamelCase )
pickle_save(__UpperCamelCase , train_ds.len_file )
pickle_save(__UpperCamelCase , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 144 |
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[int] ) -> list[int]:
UpperCAmelCase_ = len(__UpperCamelCase )
for i in range(__UpperCamelCase ):
for j in range(i + 1 , __UpperCamelCase ):
if numbers[j] < numbers[i]:
UpperCAmelCase_ , UpperCAmelCase_ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
_lowerCamelCase = input('Enter numbers separated by a comma:\n').strip()
_lowerCamelCase = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 144 | 1 |
from collections.abc import Callable
import numpy as np
def _a ( SCREAMING_SNAKE_CASE : Callable , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
UpperCamelCase__ : Tuple = int(np.ceil((x_end - xa) / step_size ) )
UpperCamelCase__ : int = np.zeros((n + 1,) )
UpperCamelCase__ : List[Any] = ya
UpperCamelCase__ : Tuple = xa
for k in range(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Union[str, Any] = y[k] + step_size * ode_func(SCREAMING_SNAKE_CASE , y[k] )
UpperCamelCase__ : Optional[int] = y[k] + (
(step_size / 2) * (ode_func(SCREAMING_SNAKE_CASE , y[k] ) + ode_func(x + step_size , SCREAMING_SNAKE_CASE ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
__UpperCamelCase : Dict = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __magic_name__ :
A: str = field(
default=__lowerCAmelCase , metadata={"help": "Model type selected in the list: " + ", ".join(__lowerCAmelCase)})
A: str = field(
default=__lowerCAmelCase , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."})
A: int = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A: int = field(
default=1_2_8 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , )
A: int = field(
default=6_4 , metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
} , )
A: int = field(
default=3_0 , metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
} , )
A: bool = field(
default=__lowerCAmelCase , metadata={"help": "Overwrite the cached training and evaluation sets"})
A: bool = field(
default=__lowerCAmelCase , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."})
A: float = field(
default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."})
A: int = field(
default=2_0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."})
A: int = field(
default=0 , metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
} , )
A: int = field(default=1 , metadata={"help": "multiple threads for converting example to features"})
class __magic_name__ ( __lowerCAmelCase):
A: Union[str, Any] = "train"
A: List[str] = "dev"
class __magic_name__ ( __lowerCAmelCase):
A: SquadDataTrainingArguments
A: List[SquadFeatures]
A: Split
A: bool
def __init__( self : Tuple , lowerCamelCase__ : SquadDataTrainingArguments , lowerCamelCase__ : PreTrainedTokenizer , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Union[str, Split] = Split.train , lowerCamelCase__ : Optional[bool] = False , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[str] = "pt" , ) -> int:
'''simple docstring'''
UpperCamelCase__ : Dict = args
UpperCamelCase__ : Union[str, Any] = is_language_sensitive
UpperCamelCase__ : List[Any] = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
try:
UpperCamelCase__ : Union[str, Any] = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
UpperCamelCase__ : List[str] = mode
# Load data features from cache or dataset file
UpperCamelCase__ : List[str] = '''v2''' if args.version_2_with_negative else '''v1'''
UpperCamelCase__ : str = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCamelCase__ : Tuple = cached_features_file + '''.lock'''
with FileLock(lowerCamelCase__ ):
if os.path.exists(lowerCamelCase__ ) and not args.overwrite_cache:
UpperCamelCase__ : int = time.time()
UpperCamelCase__ : List[str] = torch.load(lowerCamelCase__ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
UpperCamelCase__ : Any = self.old_features['''features''']
UpperCamelCase__ : Union[str, Any] = self.old_features.get('''dataset''' , lowerCamelCase__ )
UpperCamelCase__ : List[str] = self.old_features.get('''examples''' , lowerCamelCase__ )
logger.info(
F"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"
''' future run''' )
else:
if mode == Split.dev:
UpperCamelCase__ : List[str] = self.processor.get_dev_examples(args.data_dir )
else:
UpperCamelCase__ : Any = self.processor.get_train_examples(args.data_dir )
UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = squad_convert_examples_to_features(
examples=self.examples , tokenizer=lowerCamelCase__ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=lowerCamelCase__ , )
UpperCamelCase__ : Tuple = time.time()
torch.save(
{'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples} , lowerCamelCase__ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self : Any ) -> List[str]:
'''simple docstring'''
return len(self.features )
def __getitem__( self : Dict , lowerCamelCase__ : Any ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
UpperCamelCase__ : List[Any] = self.features[i]
UpperCamelCase__ : Optional[Any] = torch.tensor(feature.input_ids , dtype=torch.long )
UpperCamelCase__ : Tuple = torch.tensor(feature.attention_mask , dtype=torch.long )
UpperCamelCase__ : Optional[int] = torch.tensor(feature.token_type_ids , dtype=torch.long )
UpperCamelCase__ : int = torch.tensor(feature.cls_index , dtype=torch.long )
UpperCamelCase__ : List[Any] = torch.tensor(feature.p_mask , dtype=torch.float )
UpperCamelCase__ : Any = torch.tensor(feature.is_impossible , dtype=torch.float )
UpperCamelCase__ : Dict = {
'''input_ids''': input_ids,
'''attention_mask''': attention_mask,
'''token_type_ids''': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'''is_impossible''': is_impossible} )
if self.is_language_sensitive:
inputs.update({'''langs''': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
UpperCamelCase__ : Optional[Any] = torch.tensor(feature.start_position , dtype=torch.long )
UpperCamelCase__ : Optional[int] = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions} )
return inputs
| 106 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Union[List[PIL.Image.Image], np.ndarray]
_SCREAMING_SNAKE_CASE : Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 427 | '''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def UpperCamelCase_ ( snake_case_ : Dict , snake_case_ : str=1 ) -> str:
'''simple docstring'''
if n_shave_prefix_segments >= 0:
return ".".join(path.split(""".""" )[n_shave_prefix_segments:] )
else:
return ".".join(path.split(""".""" )[:n_shave_prefix_segments] )
def UpperCamelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any]=0 ) -> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase = []
for old_item in old_list:
__lowerCAmelCase = old_item.replace("""in_layers.0""" , """norm1""" )
__lowerCAmelCase = new_item.replace("""in_layers.2""" , """conv1""" )
__lowerCAmelCase = new_item.replace("""out_layers.0""" , """norm2""" )
__lowerCAmelCase = new_item.replace("""out_layers.3""" , """conv2""" )
__lowerCAmelCase = new_item.replace("""emb_layers.1""" , """time_emb_proj""" )
__lowerCAmelCase = new_item.replace("""skip_connection""" , """conv_shortcut""" )
__lowerCAmelCase = shave_segments(snake_case_ , n_shave_prefix_segments=snake_case_ )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def UpperCamelCase_ ( snake_case_ : List[Any] , snake_case_ : int=0 ) -> int:
'''simple docstring'''
__lowerCAmelCase = []
for old_item in old_list:
__lowerCAmelCase = old_item
__lowerCAmelCase = new_item.replace("""norm.weight""" , """group_norm.weight""" )
__lowerCAmelCase = new_item.replace("""norm.bias""" , """group_norm.bias""" )
__lowerCAmelCase = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" )
__lowerCAmelCase = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" )
__lowerCAmelCase = shave_segments(snake_case_ , n_shave_prefix_segments=snake_case_ )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def UpperCamelCase_ ( snake_case_ : List[str] , snake_case_ : Tuple , snake_case_ : Dict , snake_case_ : List[str]=None , snake_case_ : List[Any]=None , snake_case_ : List[Any]=None ) -> Any:
'''simple docstring'''
assert isinstance(snake_case_ , snake_case_ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
__lowerCAmelCase = old_checkpoint[path]
__lowerCAmelCase = old_tensor.shape[0] // 3
__lowerCAmelCase = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
__lowerCAmelCase = old_tensor.shape[0] // config["""num_head_channels"""] // 3
__lowerCAmelCase = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = old_tensor.split(channels // num_heads , dim=1 )
__lowerCAmelCase = query.reshape(snake_case_ )
__lowerCAmelCase = key.reshape(snake_case_ )
__lowerCAmelCase = value.reshape(snake_case_ )
for path in paths:
__lowerCAmelCase = path["""new"""]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
__lowerCAmelCase = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" )
__lowerCAmelCase = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" )
__lowerCAmelCase = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" )
if additional_replacements is not None:
for replacement in additional_replacements:
__lowerCAmelCase = new_path.replace(replacement["""old"""] , replacement["""new"""] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
__lowerCAmelCase = old_checkpoint[path["""old"""]][:, :, 0]
else:
__lowerCAmelCase = old_checkpoint[path["""old"""]]
def UpperCamelCase_ ( snake_case_ : Dict , snake_case_ : List[str] ) -> Tuple:
'''simple docstring'''
__lowerCAmelCase = {}
__lowerCAmelCase = checkpoint["""time_embed.0.weight"""]
__lowerCAmelCase = checkpoint["""time_embed.0.bias"""]
__lowerCAmelCase = checkpoint["""time_embed.2.weight"""]
__lowerCAmelCase = checkpoint["""time_embed.2.bias"""]
__lowerCAmelCase = checkpoint["""input_blocks.0.0.weight"""]
__lowerCAmelCase = checkpoint["""input_blocks.0.0.bias"""]
__lowerCAmelCase = checkpoint["""out.0.weight"""]
__lowerCAmelCase = checkpoint["""out.0.bias"""]
__lowerCAmelCase = checkpoint["""out.2.weight"""]
__lowerCAmelCase = checkpoint["""out.2.bias"""]
# Retrieves the keys for the input blocks only
__lowerCAmelCase = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} )
__lowerCAmelCase = {
layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key]
for layer_id in range(snake_case_ )
}
# Retrieves the keys for the middle blocks only
__lowerCAmelCase = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} )
__lowerCAmelCase = {
layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key]
for layer_id in range(snake_case_ )
}
# Retrieves the keys for the output blocks only
__lowerCAmelCase = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} )
__lowerCAmelCase = {
layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key]
for layer_id in range(snake_case_ )
}
for i in range(1 , snake_case_ ):
__lowerCAmelCase = (i - 1) // (config["""num_res_blocks"""] + 1)
__lowerCAmelCase = (i - 1) % (config["""num_res_blocks"""] + 1)
__lowerCAmelCase = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key]
__lowerCAmelCase = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key]
if f"""input_blocks.{i}.0.op.weight""" in checkpoint:
__lowerCAmelCase = checkpoint[
f"""input_blocks.{i}.0.op.weight"""
]
__lowerCAmelCase = checkpoint[
f"""input_blocks.{i}.0.op.bias"""
]
continue
__lowerCAmelCase = renew_resnet_paths(snake_case_ )
__lowerCAmelCase = {"""old""": f"""input_blocks.{i}.0""", """new""": f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
__lowerCAmelCase = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""}
assign_to_checkpoint(
snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path, resnet_op] , config=snake_case_ )
if len(snake_case_ ):
__lowerCAmelCase = renew_attention_paths(snake_case_ )
__lowerCAmelCase = {
"""old""": f"""input_blocks.{i}.1""",
"""new""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
__lowerCAmelCase = {
f"""input_blocks.{i}.1.qkv.bias""": {
"""key""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
"""query""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
"""value""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""input_blocks.{i}.1.qkv.weight""": {
"""key""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
"""query""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
"""value""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , attention_paths_to_split=snake_case_ , config=snake_case_ , )
__lowerCAmelCase = middle_blocks[0]
__lowerCAmelCase = middle_blocks[1]
__lowerCAmelCase = middle_blocks[2]
__lowerCAmelCase = renew_resnet_paths(snake_case_ )
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , config=snake_case_ )
__lowerCAmelCase = renew_resnet_paths(snake_case_ )
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , config=snake_case_ )
__lowerCAmelCase = renew_attention_paths(snake_case_ )
__lowerCAmelCase = {
"""middle_block.1.qkv.bias""": {
"""key""": """mid_block.attentions.0.key.bias""",
"""query""": """mid_block.attentions.0.query.bias""",
"""value""": """mid_block.attentions.0.value.bias""",
},
"""middle_block.1.qkv.weight""": {
"""key""": """mid_block.attentions.0.key.weight""",
"""query""": """mid_block.attentions.0.query.weight""",
"""value""": """mid_block.attentions.0.value.weight""",
},
}
assign_to_checkpoint(
snake_case_ , snake_case_ , snake_case_ , attention_paths_to_split=snake_case_ , config=snake_case_ )
for i in range(snake_case_ ):
__lowerCAmelCase = i // (config["""num_res_blocks"""] + 1)
__lowerCAmelCase = i % (config["""num_res_blocks"""] + 1)
__lowerCAmelCase = [shave_segments(snake_case_ , 2 ) for name in output_blocks[i]]
__lowerCAmelCase = {}
for layer in output_block_layers:
__lowerCAmelCase , __lowerCAmelCase = layer.split(""".""" )[0], shave_segments(snake_case_ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(snake_case_ )
else:
__lowerCAmelCase = [layer_name]
if len(snake_case_ ) > 1:
__lowerCAmelCase = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key]
__lowerCAmelCase = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key]
__lowerCAmelCase = renew_resnet_paths(snake_case_ )
__lowerCAmelCase = renew_resnet_paths(snake_case_ )
__lowerCAmelCase = {"""old""": f"""output_blocks.{i}.0""", """new""": f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
__lowerCAmelCase = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] )
__lowerCAmelCase = checkpoint[
f"""output_blocks.{i}.{index}.conv.weight"""
]
__lowerCAmelCase = checkpoint[
f"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(snake_case_ ) == 2:
__lowerCAmelCase = []
if len(snake_case_ ):
__lowerCAmelCase = renew_attention_paths(snake_case_ )
__lowerCAmelCase = {
"""old""": f"""output_blocks.{i}.1""",
"""new""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
__lowerCAmelCase = {
f"""output_blocks.{i}.1.qkv.bias""": {
"""key""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
"""query""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
"""value""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""output_blocks.{i}.1.qkv.weight""": {
"""key""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
"""query""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
"""value""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=snake_case_ , )
else:
__lowerCAmelCase = renew_resnet_paths(snake_case_ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
__lowerCAmelCase = """.""".join(["""output_blocks""", str(snake_case_ ), path["""old"""]] )
__lowerCAmelCase = """.""".join(["""up_blocks""", str(snake_case_ ), """resnets""", str(snake_case_ ), path["""new"""]] )
__lowerCAmelCase = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
_A : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
_A : Tuple = parser.parse_args()
_A : Optional[int] = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
_A : Optional[int] = json.loads(f.read())
_A : Union[str, Any] = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
_A : Dict = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
_A : Any = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
_A : Optional[Any] = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
_A : List[Any] = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 427 | 1 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=_a )
class __snake_case (_a ):
lowerCAmelCase__ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
lowerCAmelCase__ = Features({"image": Image()} )
lowerCAmelCase__ = Features({"labels": ClassLabel} )
lowerCAmelCase__ = "image"
lowerCAmelCase__ = "labels"
def SCREAMING_SNAKE_CASE ( self : str , _UpperCAmelCase : Optional[int] ) -> str:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , _UpperCAmelCase ):
raise ValueError(f"Column {self.label_column} is not a ClassLabel." )
_lowerCAmelCase : List[str] = copy.deepcopy(self )
_lowerCAmelCase : int = self.label_schema.copy()
_lowerCAmelCase : List[Any] = features[self.label_column]
_lowerCAmelCase : Tuple = label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict[str, str]:
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 701 |
def _UpperCAmelCase (UpperCamelCase_ : int ):
'''simple docstring'''
_lowerCAmelCase : int = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(2_7))
print(perfect_cube(4))
| 196 | 0 |
from __future__ import annotations
def __lowerCamelCase (UpperCAmelCase__ : list[int] , UpperCAmelCase__ : int ):
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
SCREAMING_SNAKE_CASE = i + 1
else:
SCREAMING_SNAKE_CASE = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 403 | import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_lowerCamelCase : Union[str, Any] = get_tests_dir('''fixtures''')
class lowercase ( unittest.TestCase ):
def __snake_case( self : str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = mock.Mock()
SCREAMING_SNAKE_CASE = 500
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = HTTPError
SCREAMING_SNAKE_CASE = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=_UpperCamelCase ) as mock_head:
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# This check we did call the fake head request
mock_head.assert_called()
def __snake_case( self : List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" )
@is_staging_test
class lowercase ( unittest.TestCase ):
@classmethod
def __snake_case( cls : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = TOKEN
HfFolder.save_token(_UpperCamelCase )
@classmethod
def __snake_case( cls : Dict ) -> List[str]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-feature-extractor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-feature-extractor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-feature-extractor" )
except HTTPError:
pass
def __snake_case( self : Any ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(_UpperCamelCase )
feature_extractor.push_to_hub("test-feature-extractor" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(F"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_UpperCamelCase , repo_id="test-feature-extractor" , push_to_hub=_UpperCamelCase , use_auth_token=self._token )
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(F"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
def __snake_case( self : Optional[int] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(_UpperCamelCase )
feature_extractor.push_to_hub("valid_org/test-feature-extractor" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_UpperCamelCase , repo_id="valid_org/test-feature-extractor-org" , push_to_hub=_UpperCamelCase , use_auth_token=self._token )
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
def __snake_case( self : List[str] ) -> Tuple:
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
SCREAMING_SNAKE_CASE = CustomFeatureExtractor.from_pretrained(_UpperCamelCase )
feature_extractor.push_to_hub("test-dynamic-feature-extractor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} , )
SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(
F"{USER}/test-dynamic-feature-extractor" , trust_remote_code=_UpperCamelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , "CustomFeatureExtractor" )
| 403 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCamelCase__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 202 |
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Any:
"""simple docstring"""
_UpperCamelCase = os.path.abspath(lowerCAmelCase )
logger.info(F'Converting TensorFlow checkpoint from {tf_path}' )
# Load weights from TF model
_UpperCamelCase = tf.train.list_variables(lowerCAmelCase )
_UpperCamelCase = []
_UpperCamelCase = []
_UpperCamelCase = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
_UpperCamelCase = full_name.split("""/""" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(F'Skipping non-model layer {full_name}' )
continue
if "optimizer" in full_name:
logger.info(F'Skipping optimization layer {full_name}' )
continue
if name[0] == "model":
# ignore initial 'model'
_UpperCamelCase = name[1:]
# figure out how many levels deep the name is
_UpperCamelCase = 0
for _name in name:
if _name.startswith("""layer_with_weights""" ):
depth += 1
else:
break
layer_depth.append(lowerCAmelCase )
# read data
_UpperCamelCase = tf.train.load_variable(lowerCAmelCase , lowerCAmelCase )
names.append("""/""".join(lowerCAmelCase ) )
arrays.append(lowerCAmelCase )
logger.info(F'Read a total of {len(lowerCAmelCase ):,} layers' )
# Sanity check
if len(set(lowerCAmelCase ) ) != 1:
raise ValueError(F'Found layer names with different depths (layer depth {list(set(lowerCAmelCase ) )})' )
_UpperCamelCase = list(set(lowerCAmelCase ) )[0]
if layer_depth != 1:
raise ValueError(
"""The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"""
""" heads.""" )
# convert layers
logger.info("""Converting weights...""" )
for full_name, array in zip(lowerCAmelCase , lowerCAmelCase ):
_UpperCamelCase = full_name.split("""/""" )
_UpperCamelCase = model
_UpperCamelCase = []
for i, m_name in enumerate(lowerCAmelCase ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("""layer_with_weights""" ):
_UpperCamelCase = int(m_name.split("""-""" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["""embeddings""", """LayerNorm"""] )
_UpperCamelCase = getattr(lowerCAmelCase , """embeddings""" )
_UpperCamelCase = getattr(lowerCAmelCase , """LayerNorm""" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["""encoder""", """layer""", str(layer_num - 4 )] )
_UpperCamelCase = getattr(lowerCAmelCase , """encoder""" )
_UpperCamelCase = getattr(lowerCAmelCase , """layer""" )
_UpperCamelCase = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["""pooler""", """dense"""] )
_UpperCamelCase = getattr(lowerCAmelCase , """pooler""" )
_UpperCamelCase = getattr(lowerCAmelCase , """dense""" )
elif m_name == "embeddings":
trace.append("""embeddings""" )
_UpperCamelCase = getattr(lowerCAmelCase , """embeddings""" )
if layer_num == 0:
trace.append("""word_embeddings""" )
_UpperCamelCase = getattr(lowerCAmelCase , """word_embeddings""" )
elif layer_num == 1:
trace.append("""position_embeddings""" )
_UpperCamelCase = getattr(lowerCAmelCase , """position_embeddings""" )
elif layer_num == 2:
trace.append("""token_type_embeddings""" )
_UpperCamelCase = getattr(lowerCAmelCase , """token_type_embeddings""" )
else:
raise ValueError(F'Unknown embedding layer with name {full_name}' )
trace.append("""weight""" )
_UpperCamelCase = getattr(lowerCAmelCase , """weight""" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["""attention""", """self"""] )
_UpperCamelCase = getattr(lowerCAmelCase , """attention""" )
_UpperCamelCase = getattr(lowerCAmelCase , """self""" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["""attention""", """output""", """LayerNorm"""] )
_UpperCamelCase = getattr(lowerCAmelCase , """attention""" )
_UpperCamelCase = getattr(lowerCAmelCase , """output""" )
_UpperCamelCase = getattr(lowerCAmelCase , """LayerNorm""" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["""attention""", """output""", """dense"""] )
_UpperCamelCase = getattr(lowerCAmelCase , """attention""" )
_UpperCamelCase = getattr(lowerCAmelCase , """output""" )
_UpperCamelCase = getattr(lowerCAmelCase , """dense""" )
elif m_name == "_output_dense":
# output dense
trace.extend(["""output""", """dense"""] )
_UpperCamelCase = getattr(lowerCAmelCase , """output""" )
_UpperCamelCase = getattr(lowerCAmelCase , """dense""" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["""output""", """LayerNorm"""] )
_UpperCamelCase = getattr(lowerCAmelCase , """output""" )
_UpperCamelCase = getattr(lowerCAmelCase , """LayerNorm""" )
elif m_name == "_key_dense":
# attention key
trace.append("""key""" )
_UpperCamelCase = getattr(lowerCAmelCase , """key""" )
elif m_name == "_query_dense":
# attention query
trace.append("""query""" )
_UpperCamelCase = getattr(lowerCAmelCase , """query""" )
elif m_name == "_value_dense":
# attention value
trace.append("""value""" )
_UpperCamelCase = getattr(lowerCAmelCase , """value""" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["""intermediate""", """dense"""] )
_UpperCamelCase = getattr(lowerCAmelCase , """intermediate""" )
_UpperCamelCase = getattr(lowerCAmelCase , """dense""" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("""output""" )
_UpperCamelCase = getattr(lowerCAmelCase , """output""" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("""bias""" )
_UpperCamelCase = getattr(lowerCAmelCase , """bias""" )
elif m_name in ["kernel", "gamma"]:
trace.append("""weight""" )
_UpperCamelCase = getattr(lowerCAmelCase , """weight""" )
else:
logger.warning(F'Ignored {m_name}' )
# for certain layers reshape is necessary
_UpperCamelCase = """.""".join(lowerCAmelCase )
if re.match(R"""(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)""" , lowerCAmelCase ) or re.match(
R"""(\S+)\.attention\.output\.dense\.weight""" , lowerCAmelCase ):
_UpperCamelCase = array.reshape(pointer.data.shape )
if "kernel" in full_name:
_UpperCamelCase = array.transpose()
if pointer.shape == array.shape:
_UpperCamelCase = torch.from_numpy(lowerCAmelCase )
else:
raise ValueError(
F'Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'
F' {array.shape}' )
logger.info(F'Successfully set variable {full_name} to PyTorch layer {trace}' )
return model
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Tuple:
"""simple docstring"""
logger.info(F'Loading model based on config from {config_path}...' )
_UpperCamelCase = BertConfig.from_json_file(lowerCAmelCase )
_UpperCamelCase = BertModel(lowerCAmelCase )
# Load weights from checkpoint
logger.info(F'Loading weights from checkpoint {tf_checkpoint_path}...' )
load_tfa_weights_in_bert(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
logger.info(F'Saving PyTorch model to {pytorch_dump_path}...' )
torch.save(model.state_dict() , lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow 2.x checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model (must include filename).",
)
lowerCamelCase__ = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 202 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a : Any = logging.get_logger(__name__)
__a : Tuple = {
"""kssteven/ibert-roberta-base""": """https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json""",
"""kssteven/ibert-roberta-large""": """https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json""",
"""kssteven/ibert-roberta-large-mnli""": (
"""https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"""
),
}
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
lowercase = """ibert"""
def __init__( self , SCREAMING_SNAKE_CASE=30522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-12 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE="absolute" , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="none" , **SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = position_embedding_type
UpperCamelCase = quant_mode
UpperCamelCase = force_dequant
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 606 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__a : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
UpperCamelCase = (
f'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'''
f''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '''
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE )
UpperCamelCase = dict(scheduler.config )
UpperCamelCase = 1
UpperCamelCase = FrozenDict(SCREAMING_SNAKE_CASE )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
UpperCamelCase = (
f'''The configuration file of this scheduler: {scheduler} has not set the configuration'''
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE )
UpperCamelCase = dict(scheduler.config )
UpperCamelCase = True
UpperCamelCase = FrozenDict(SCREAMING_SNAKE_CASE )
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=SCREAMING_SNAKE_CASE , segmentation_processor=SCREAMING_SNAKE_CASE , vae=SCREAMING_SNAKE_CASE , text_encoder=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE = "auto" ) -> Optional[Any]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
self.enable_attention_slicing(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCamelCase = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(SCREAMING_SNAKE_CASE , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 512 , SCREAMING_SNAKE_CASE = 512 , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = 7.5 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = "pil" , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , **SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
UpperCamelCase = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
UpperCamelCase = self.segmentation_model(**SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
UpperCamelCase = self.numpy_to_pil(SCREAMING_SNAKE_CASE )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
UpperCamelCase = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , mask_image=SCREAMING_SNAKE_CASE , height=SCREAMING_SNAKE_CASE , width=SCREAMING_SNAKE_CASE , num_inference_steps=SCREAMING_SNAKE_CASE , guidance_scale=SCREAMING_SNAKE_CASE , negative_prompt=SCREAMING_SNAKE_CASE , num_images_per_prompt=SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , latents=SCREAMING_SNAKE_CASE , output_type=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , callback=SCREAMING_SNAKE_CASE , callback_steps=SCREAMING_SNAKE_CASE , )
| 606 | 1 |
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
__lowercase :Dict = logging.get_logger(__name__)
__lowercase :List[Any] = {"vocab_file": "spiece.model"}
__lowercase :Union[str, Any] = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
}
}
# TODO(PVP) - this should be removed in Transformers v5
__lowercase :List[Any] = {
"t5-small": 512,
"t5-base": 512,
"t5-large": 512,
"t5-3b": 512,
"t5-11b": 512,
}
__lowercase :List[str] = "▁"
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["input_ids", "attention_mask"]
def __init__( self : Tuple , a : int , a : Optional[Any]="</s>" , a : Optional[Any]="<unk>" , a : Tuple="<pad>" , a : int=1_00 , a : Tuple=None , a : Optional[Dict[str, Any]] = None , a : List[Any]=True , **a : Dict , ) ->None:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
SCREAMING_SNAKE_CASE__ : Tuple = [f"""<extra_id_{i}>""" for i in range(a )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
SCREAMING_SNAKE_CASE__ : Dict = len(set(filter(lambda a : bool("extra_id" in str(a ) ) , a ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
if legacy:
logger.warning_once(
f"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"""
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = legacy
SCREAMING_SNAKE_CASE__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=a , unk_token=a , pad_token=a , extra_ids=a , additional_special_tokens=a , sp_model_kwargs=self.sp_model_kwargs , legacy=a , **a , )
SCREAMING_SNAKE_CASE__ : Dict = vocab_file
SCREAMING_SNAKE_CASE__ : Optional[int] = extra_ids
SCREAMING_SNAKE_CASE__ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a )
@staticmethod
def A_ ( a : Any , a : List[Any] , a : Dict ) ->str:
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
SCREAMING_SNAKE_CASE__ : Optional[int] = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
f""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
f""" {pretrained_model_name_or_path} automatically truncating your input to"""
f""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
f""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , a , )
return max_model_length
@property
def A_ ( self : List[str] ) ->int:
return self.sp_model.get_piece_size() + self._extra_ids
def A_ ( self : str ) ->int:
SCREAMING_SNAKE_CASE__ : Tuple = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A_ ( self : int , a : List[int] , a : Optional[List[int]] = None , a : bool = False ) ->List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(a )) + [1]
return ([0] * len(a )) + [1] + ([0] * len(a )) + [1]
def A_ ( self : Optional[int] ) ->List[Any]:
return list(
set(filter(lambda a : bool(re.search(r"<extra_id_\d+>" , a ) ) is not None , self.additional_special_tokens ) ) )
def A_ ( self : Union[str, Any] ) ->Dict:
return [self._convert_token_to_id(a ) for token in self.get_sentinel_tokens()]
def A_ ( self : str , a : List[int] ) ->List[int]:
if len(a ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def A_ ( self : str , a : List[int] , a : Optional[List[int]] = None ) ->List[int]:
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def A_ ( self : str , a : List[int] , a : Optional[List[int]] = None ) ->List[int]:
SCREAMING_SNAKE_CASE__ : Tuple = self._add_eos_if_not_present(a )
if token_ids_a is None:
return token_ids_a
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = self._add_eos_if_not_present(a )
return token_ids_a + token_ids_a
def __getstate__( self : List[Any] ) ->str:
SCREAMING_SNAKE_CASE__ : List[Any] = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ : str = None
return state
def __setstate__( self : Any , a : List[str] ) ->Tuple:
SCREAMING_SNAKE_CASE__ : Optional[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
SCREAMING_SNAKE_CASE__ : List[Any] = {}
SCREAMING_SNAKE_CASE__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A_ ( self : Dict , a : "TextInput" , **a : Dict ) ->List[str]:
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
SCREAMING_SNAKE_CASE__ : Tuple = SPIECE_UNDERLINE + text.replace(a , " " )
return super().tokenize(a , **a )
def A_ ( self : List[str] , a : List[Any] , **a : Union[str, Any] ) ->List[str]:
if not self.legacy:
SCREAMING_SNAKE_CASE__ : int = text.startswith(a )
if is_first:
SCREAMING_SNAKE_CASE__ : int = text[1:]
SCREAMING_SNAKE_CASE__ : Dict = self.sp_model.encode(a , out_type=a )
if not self.legacy and not is_first and not text.startswith(" " ) and tokens[0].startswith(a ):
SCREAMING_SNAKE_CASE__ : Dict = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def A_ ( self : int , a : List[Any] ) ->Optional[Any]:
if token.startswith("<extra_id_" ):
SCREAMING_SNAKE_CASE__ : Optional[int] = re.match(r"<extra_id_(\d+)>" , a )
SCREAMING_SNAKE_CASE__ : Dict = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(a )
def A_ ( self : Optional[int] , a : Union[str, Any] ) ->List[str]:
if index < self.sp_model.get_piece_size():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.sp_model.IdToPiece(a )
else:
SCREAMING_SNAKE_CASE__ : int = f"""<extra_id_{self.vocab_size - 1 - index}>"""
return token
def A_ ( self : Any , a : Union[str, Any] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = []
SCREAMING_SNAKE_CASE__ : Dict = ""
SCREAMING_SNAKE_CASE__ : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a ) + token
SCREAMING_SNAKE_CASE__ : int = True
SCREAMING_SNAKE_CASE__ : List[Any] = []
else:
current_sub_tokens.append(a )
SCREAMING_SNAKE_CASE__ : List[str] = False
out_string += self.sp_model.decode(a )
return out_string.strip()
def A_ ( self : Any , a : str , a : Optional[str] = None ) ->Tuple[str]:
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE__ : str = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a )
elif not os.path.isfile(self.vocab_file ):
with open(a , "wb" ) as fi:
SCREAMING_SNAKE_CASE__ : Any = self.sp_model.serialized_model_proto()
fi.write(a )
return (out_vocab_file,) | 26 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list , _lowerCamelCase : int | None = None , _lowerCamelCase : int | None = None ):
'''simple docstring'''
if start is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
if end is None:
SCREAMING_SNAKE_CASE__ : Any = len(_lowerCamelCase ) - 1
if start >= end:
return
SCREAMING_SNAKE_CASE__ : List[str] = (start + end) // 2
slowsort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
slowsort(_lowerCamelCase , mid + 1 , _lowerCamelCase )
if sequence[end] < sequence[mid]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = sequence[mid], sequence[end]
slowsort(_lowerCamelCase , _lowerCamelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod() | 26 | 1 |
"""simple docstring"""
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : Any = None , lowerCAmelCase_ : Tuple = "geglu" , lowerCAmelCase_ : List[str] = None , lowerCAmelCase_ : Tuple = False , lowerCAmelCase_ : Optional[int] = False , lowerCAmelCase_ : str = False , lowerCAmelCase_ : Dict = False , lowerCAmelCase_ : int = True , lowerCAmelCase_ : str = "layer_norm" , lowerCAmelCase_ : Optional[Any] = False , ):
"""simple docstring"""
super().__init__()
lowercase_ = only_cross_attention
lowercase_ = (num_embeds_ada_norm is not None) and norm_type == """ada_norm_zero"""
lowercase_ = (num_embeds_ada_norm is not None) and norm_type == """ada_norm"""
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
F''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''')
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
lowercase_ = AdaLayerNorm(snake_case__ , snake_case__)
elif self.use_ada_layer_norm_zero:
lowercase_ = AdaLayerNormZero(snake_case__ , snake_case__)
else:
lowercase_ = nn.LayerNorm(snake_case__ , elementwise_affine=snake_case__)
lowercase_ = Attention(
query_dim=snake_case__ , heads=snake_case__ , dim_head=snake_case__ , dropout=snake_case__ , bias=snake_case__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=snake_case__ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
lowercase_ = (
AdaLayerNorm(snake_case__ , snake_case__)
if self.use_ada_layer_norm
else nn.LayerNorm(snake_case__ , elementwise_affine=snake_case__)
)
lowercase_ = Attention(
query_dim=snake_case__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=snake_case__ , dim_head=snake_case__ , dropout=snake_case__ , bias=snake_case__ , upcast_attention=snake_case__ , ) # is self-attn if encoder_hidden_states is none
else:
lowercase_ = None
lowercase_ = None
# 3. Feed-forward
lowercase_ = nn.LayerNorm(snake_case__ , elementwise_affine=snake_case__)
lowercase_ = FeedForward(snake_case__ , dropout=snake_case__ , activation_fn=snake_case__ , final_dropout=snake_case__)
# let chunk size default to None
lowercase_ = None
lowercase_ = 0
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = chunk_size
lowercase_ = dim
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str] = None , lowerCAmelCase_ : Dict = None , lowerCAmelCase_ : int = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Dict = None , ):
"""simple docstring"""
if self.use_ada_layer_norm:
lowercase_ = self.norma(snake_case__ , snake_case__)
elif self.use_ada_layer_norm_zero:
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = self.norma(
snake_case__ , snake_case__ , snake_case__ , hidden_dtype=hidden_states.dtype)
else:
lowercase_ = self.norma(snake_case__)
lowercase_ = cross_attention_kwargs if cross_attention_kwargs is not None else {}
lowercase_ = self.attna(
snake_case__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=snake_case__ , **snake_case__ , )
if self.use_ada_layer_norm_zero:
lowercase_ = gate_msa.unsqueeze(1) * attn_output
lowercase_ = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
lowercase_ = (
self.norma(snake_case__ , snake_case__) if self.use_ada_layer_norm else self.norma(snake_case__)
)
lowercase_ = self.attna(
snake_case__ , encoder_hidden_states=snake_case__ , attention_mask=snake_case__ , **snake_case__ , )
lowercase_ = attn_output + hidden_states
# 3. Feed-forward
lowercase_ = self.norma(snake_case__)
if self.use_ada_layer_norm_zero:
lowercase_ = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''')
lowercase_ = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
lowercase_ = torch.cat(
[self.ff(snake_case__) for hid_slice in norm_hidden_states.chunk(snake_case__ , dim=self._chunk_dim)] , dim=self._chunk_dim , )
else:
lowercase_ = self.ff(snake_case__)
if self.use_ada_layer_norm_zero:
lowercase_ = gate_mlp.unsqueeze(1) * ff_output
lowercase_ = ff_output + hidden_states
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any = None , lowerCAmelCase_ : Tuple = 4 , lowerCAmelCase_ : Optional[Any] = 0.0 , lowerCAmelCase_ : Union[str, Any] = "geglu" , lowerCAmelCase_ : Dict = False , ):
"""simple docstring"""
super().__init__()
lowercase_ = int(dim * mult)
lowercase_ = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
lowercase_ = GELU(snake_case__ , snake_case__)
if activation_fn == "gelu-approximate":
lowercase_ = GELU(snake_case__ , snake_case__ , approximate="""tanh""")
elif activation_fn == "geglu":
lowercase_ = GEGLU(snake_case__ , snake_case__)
elif activation_fn == "geglu-approximate":
lowercase_ = ApproximateGELU(snake_case__ , snake_case__)
lowercase_ = nn.ModuleList([])
# project in
self.net.append(snake_case__)
# project dropout
self.net.append(nn.Dropout(snake_case__))
# project out
self.net.append(nn.Linear(snake_case__ , snake_case__))
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(snake_case__))
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : Dict):
"""simple docstring"""
for module in self.net:
lowercase_ = module(snake_case__)
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] = "none"):
"""simple docstring"""
super().__init__()
lowercase_ = nn.Linear(snake_case__ , snake_case__)
lowercase_ = approximate
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : List[str]):
"""simple docstring"""
if gate.device.type != "mps":
return F.gelu(snake_case__ , approximate=self.approximate)
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa) , approximate=self.approximate).to(dtype=gate.dtype)
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = self.proj(snake_case__)
lowercase_ = self.gelu(snake_case__)
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple):
"""simple docstring"""
super().__init__()
lowercase_ = nn.Linear(snake_case__ , dim_out * 2)
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
if gate.device.type != "mps":
return F.gelu(snake_case__)
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa)).to(dtype=gate.dtype)
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ , lowercase_ = self.proj(snake_case__).chunk(2 , dim=-1)
return hidden_states * self.gelu(snake_case__)
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
super().__init__()
lowercase_ = nn.Linear(snake_case__ , snake_case__)
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
lowercase_ = self.proj(snake_case__)
return x * torch.sigmoid(1.702 * x)
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any]):
"""simple docstring"""
super().__init__()
lowercase_ = nn.Embedding(snake_case__ , snake_case__)
lowercase_ = nn.SiLU()
lowercase_ = nn.Linear(snake_case__ , embedding_dim * 2)
lowercase_ = nn.LayerNorm(snake_case__ , elementwise_affine=snake_case__)
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
lowercase_ = self.linear(self.silu(self.emb(snake_case__)))
lowercase_ , lowercase_ = torch.chunk(snake_case__ , 2)
lowercase_ = self.norm(snake_case__) * (1 + scale) + shift
return x
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any):
"""simple docstring"""
super().__init__()
lowercase_ = CombinedTimestepLabelEmbeddings(snake_case__ , snake_case__)
lowercase_ = nn.SiLU()
lowercase_ = nn.Linear(snake_case__ , 6 * embedding_dim , bias=snake_case__)
lowercase_ = nn.LayerNorm(snake_case__ , elementwise_affine=snake_case__ , eps=1E-6)
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int]=None):
"""simple docstring"""
lowercase_ = self.linear(self.silu(self.emb(snake_case__ , snake_case__ , hidden_dtype=snake_case__)))
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = emb.chunk(6 , dim=1)
lowercase_ = self.norm(snake_case__) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] = None , lowerCAmelCase_ : Dict = 1E-5):
"""simple docstring"""
super().__init__()
lowercase_ = num_groups
lowercase_ = eps
if act_fn is None:
lowercase_ = None
else:
lowercase_ = get_activation(snake_case__)
lowercase_ = nn.Linear(snake_case__ , out_dim * 2)
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple):
"""simple docstring"""
if self.act:
lowercase_ = self.act(snake_case__)
lowercase_ = self.linear(snake_case__)
lowercase_ = emb[:, :, None, None]
lowercase_ , lowercase_ = emb.chunk(2 , dim=1)
lowercase_ = F.group_norm(snake_case__ , self.num_groups , eps=self.eps)
lowercase_ = x * (1 + scale) + shift
return x
| 567 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase_ :
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=30 , snake_case__=2 , snake_case__=3 , snake_case__=True , snake_case__=True , snake_case__=32 , snake_case__=2 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=10 , snake_case__=0.02 , snake_case__=3 , snake_case__=None , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase = (image_size // patch_size) ** 2
UpperCAmelCase = num_patches + 1
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ) -> Dict:
"""simple docstring"""
UpperCAmelCase = TFViTModel(config=snake_case__ )
UpperCAmelCase = model(snake_case__ , training=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase = self.image_size // 2
UpperCAmelCase = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase = model(snake_case__ , interpolate_pos_encoding=snake_case__ , training=snake_case__ )
UpperCAmelCase = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.type_sequence_label_size
UpperCAmelCase = TFViTForImageClassification(snake_case__ )
UpperCAmelCase = model(snake_case__ , labels=snake_case__ , training=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase = self.image_size // 2
UpperCAmelCase = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase = model(snake_case__ , interpolate_pos_encoding=snake_case__ , training=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase = 1
UpperCAmelCase = TFViTForImageClassification(snake_case__ )
UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( a_ , a_ , unittest.TestCase ):
_A : Optional[int] = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
_A : Optional[Any] = (
{'feature-extraction': TFViTModel, 'image-classification': TFViTForImageClassification}
if is_tf_available()
else {}
)
_A : Optional[int] = False
_A : Any = False
_A : List[str] = False
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase = TFViTModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
pass
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , tf.keras.layers.Layer ) )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(snake_case__ )
UpperCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case__ )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
@slow
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase = TFViTModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(snake_case__ )
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = TFViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=snake_case__ , return_tensors="""tf""" )
# forward pass
UpperCAmelCase = model(**snake_case__ )
# verify the logits
UpperCAmelCase = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , snake_case__ )
UpperCAmelCase = tf.constant([-0.2_744, 0.8_215, -0.0_836] )
tf.debugging.assert_near(outputs.logits[0, :3] , snake_case__ , atol=1e-4 )
| 673 | 0 |
'''simple docstring'''
def __lowerCamelCase ( _lowercase = 1_0_0_0 ) -> int:
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 719 |
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
a : Any = get_logger()
a : Optional[dict] = None
class UpperCamelCase_ ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
def __init__( self , A=None , A=None , **A ) -> str:
super().__init__(features=A )
import jax
from jaxlib.xla_client import Device
if isinstance(A , A ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(A )}, as `jaxlib.xla_extension.Device` '''
"""is not serializable neither with `pickle` nor with `dill`. Instead you can surround """
"""the device with `str()` to get its string identifier that will be internally mapped """
"""to the actual `jaxlib.xla_extension.Device`.""" )
UpperCAmelCase : Optional[int] = device if isinstance(A , A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCAmelCase : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
UpperCAmelCase : List[Any] = str(jax.devices()[0] )
UpperCAmelCase : Union[str, Any] = jnp_array_kwargs
@staticmethod
def _lowercase( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
import jax
return {str(A ): device for device in jax.devices()}
def _lowercase( self , A ) -> str:
import jax
import jax.numpy as jnp
if isinstance(A , A ) and column:
if all(
isinstance(A , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(A , axis=0 )
return column
def _lowercase( self , A ) -> Tuple:
import jax
import jax.numpy as jnp
if isinstance(A , (str, bytes, type(A )) ):
return value
elif isinstance(A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
UpperCAmelCase : List[str] = {}
if isinstance(A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
UpperCAmelCase : str = {"""dtype""": jnp.intaa}
else:
UpperCAmelCase : int = {"""dtype""": jnp.intaa}
elif isinstance(A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
UpperCAmelCase : Any = {"""dtype""": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(A , PIL.Image.Image ):
UpperCAmelCase : List[str] = np.asarray(A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCAmelCase : Dict = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(A , **{**default_dtype, **self.jnp_array_kwargs} )
def _lowercase( self , A ) -> Tuple:
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(A , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(A , """__array__""" ) and not isinstance(A , jax.Array ):
UpperCAmelCase : Optional[int] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(A , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(A ) for substruct in data_struct] )
elif isinstance(A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(A ) for substruct in data_struct] )
return self._tensorize(A )
def _lowercase( self , A ) -> Dict:
return map_nested(self._recursive_tensorize , A , map_list=A )
def _lowercase( self , A ) -> Mapping:
UpperCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_row(A )
UpperCAmelCase : Dict = self.python_features_decoder.decode_row(A )
return self.recursive_tensorize(A )
def _lowercase( self , A ) -> "jax.Array":
UpperCAmelCase : int = self.numpy_arrow_extractor().extract_column(A )
UpperCAmelCase : Optional[Any] = self.python_features_decoder.decode_column(A , pa_table.column_names[0] )
UpperCAmelCase : Optional[int] = self.recursive_tensorize(A )
UpperCAmelCase : Any = self._consolidate(A )
return column
def _lowercase( self , A ) -> Mapping:
UpperCAmelCase : Optional[int] = self.numpy_arrow_extractor().extract_batch(A )
UpperCAmelCase : List[str] = self.python_features_decoder.decode_batch(A )
UpperCAmelCase : Union[str, Any] = self.recursive_tensorize(A )
for column_name in batch:
UpperCAmelCase : Optional[Any] = self._consolidate(batch[column_name] )
return batch
| 672 | 0 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = """▁"""
snake_case = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
snake_case = {
"""vocab_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json""",
},
"""spm_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_config_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json""",
},
}
snake_case = {
"""facebook/m2m100_418M""": 10_24,
}
# fmt: off
snake_case = {
"""m2m100""": ["""af""", """am""", """ar""", """ast""", """az""", """ba""", """be""", """bg""", """bn""", """br""", """bs""", """ca""", """ceb""", """cs""", """cy""", """da""", """de""", """el""", """en""", """es""", """et""", """fa""", """ff""", """fi""", """fr""", """fy""", """ga""", """gd""", """gl""", """gu""", """ha""", """he""", """hi""", """hr""", """ht""", """hu""", """hy""", """id""", """ig""", """ilo""", """is""", """it""", """ja""", """jv""", """ka""", """kk""", """km""", """kn""", """ko""", """lb""", """lg""", """ln""", """lo""", """lt""", """lv""", """mg""", """mk""", """ml""", """mn""", """mr""", """ms""", """my""", """ne""", """nl""", """no""", """ns""", """oc""", """or""", """pa""", """pl""", """ps""", """pt""", """ro""", """ru""", """sd""", """si""", """sk""", """sl""", """so""", """sq""", """sr""", """ss""", """su""", """sv""", """sw""", """ta""", """th""", """tl""", """tn""", """tr""", """uk""", """ur""", """uz""", """vi""", """wo""", """xh""", """yi""", """yo""", """zh""", """zu"""],
"""wmt21""": ["""en""", """ha""", """is""", """ja""", """cs""", """ru""", """zh""", """de"""]
}
class lowerCAmelCase ( UpperCamelCase_ ):
A_ : List[Any] = VOCAB_FILES_NAMES
A_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
A_ : Any = ["""input_ids""", """attention_mask"""]
A_ : Optional[Any] = []
A_ : Dict = []
def __init__( self : Dict , a__ : List[str] , a__ : Optional[int] , a__ : Any=None , a__ : Any=None , a__ : Optional[Any]="<s>" , a__ : str="</s>" , a__ : Dict="</s>" , a__ : Optional[int]="<pad>" , a__ : Optional[Any]="<unk>" , a__ : Union[str, Any]="m2m100" , a__ : List[Any] = None , a__ : List[Any]=8 , **a__ : Dict , ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCAmelCase__ : List[str] = language_codes
lowerCAmelCase__ : Union[str, Any] = FAIRSEQ_LANGUAGE_CODES[language_codes]
lowerCAmelCase__ : List[str] = {lang_code: F'''__{lang_code}__''' for lang_code in fairseq_language_code}
lowerCAmelCase__ : Union[str, Any] = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(__lowerCAmelCase )
for lang_code in fairseq_language_code
if self.get_lang_token(__lowerCAmelCase ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__lowerCAmelCase , tgt_lang=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , language_codes=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__lowerCAmelCase , **__lowerCAmelCase , )
lowerCAmelCase__ : Optional[int] = vocab_file
lowerCAmelCase__ : Union[str, Any] = load_json(__lowerCAmelCase )
lowerCAmelCase__ : List[Any] = {v: k for k, v in self.encoder.items()}
lowerCAmelCase__ : str = spm_file
lowerCAmelCase__ : str = load_spm(__lowerCAmelCase , self.sp_model_kwargs )
lowerCAmelCase__ : Dict = len(self.encoder )
lowerCAmelCase__ : List[str] = {
self.get_lang_token(__lowerCAmelCase ): self.encoder_size + i for i, lang_code in enumerate(__lowerCAmelCase )
}
lowerCAmelCase__ : List[str] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(__lowerCAmelCase )}
lowerCAmelCase__ : Any = {v: k for k, v in self.lang_token_to_id.items()}
lowerCAmelCase__ : Any = src_lang if src_lang is not None else "en"
lowerCAmelCase__ : str = tgt_lang
lowerCAmelCase__ : Tuple = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
lowerCAmelCase__ : Dict = num_madeup_words
@property
def _A ( self : List[Any] ):
'''simple docstring'''
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def _A ( self : int ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def _A ( self : Any , a__ : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _A ( self : List[str] , a__ : Tuple ):
'''simple docstring'''
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def _A ( self : Any , a__ : Tuple ):
'''simple docstring'''
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(__lowerCAmelCase , self.encoder[self.unk_token] )
def _A ( self : Optional[Any] , a__ : Any ):
'''simple docstring'''
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(__lowerCAmelCase , self.unk_token )
def _A ( self : str , a__ : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = []
lowerCAmelCase__ : int = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
lowerCAmelCase__ : Optional[Any] = []
else:
current_sub_tokens.append(__lowerCAmelCase )
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string.strip()
def _A ( self : Any , a__ : str , a__ : Union[str, Any] = None , a__ : int = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
lowerCAmelCase__ : int = [1] * len(self.prefix_tokens )
lowerCAmelCase__ : Union[str, Any] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__lowerCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(__lowerCAmelCase )) + ([0] * len(__lowerCAmelCase )) + suffix_ones
def _A ( self : Union[str, Any] , a__ : int , a__ : Dict = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _A ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.__dict__.copy()
lowerCAmelCase__ : int = None
return state
def __setstate__( self : List[str] , a__ : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCAmelCase__ : List[str] = {}
lowerCAmelCase__ : Dict = load_spm(self.spm_file , self.sp_model_kwargs )
def _A ( self : Tuple , a__ : Tuple , a__ : Dict = None ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = Path(__lowerCAmelCase )
if not save_dir.is_dir():
raise OSError(F'''{save_directory} should be a directory''' )
lowerCAmelCase__ : Any = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
lowerCAmelCase__ : Any = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , __lowerCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __lowerCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(__lowerCAmelCase , "wb" ) as fi:
lowerCAmelCase__ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (str(__lowerCAmelCase ), str(__lowerCAmelCase ))
def _A ( self : Optional[Any] , a__ : str , a__ : Tuple = "en" , a__ : Tuple = None , a__ : Optional[Any] = "ro" , **a__ : Optional[Any] , ):
'''simple docstring'''
lowerCAmelCase__ : Dict = src_lang
lowerCAmelCase__ : List[Any] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
def _A ( self : Optional[Any] , a__ : int , a__ : List[Any] , a__ : int , **a__ : List[str] ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowerCAmelCase__ : Union[str, Any] = src_lang
lowerCAmelCase__ : Any = self(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , **__lowerCAmelCase )
lowerCAmelCase__ : Tuple = self.get_lang_id(__lowerCAmelCase )
lowerCAmelCase__ : List[str] = tgt_lang_id
return inputs
def _A ( self : str ):
'''simple docstring'''
self.set_src_lang_special_tokens(self.src_lang )
def _A ( self : Any ):
'''simple docstring'''
self.set_tgt_lang_special_tokens(self.tgt_lang )
def _A ( self : int , a__ : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.get_lang_token(__lowerCAmelCase )
lowerCAmelCase__ : Optional[int] = self.lang_token_to_id[lang_token]
lowerCAmelCase__ : Dict = [self.cur_lang_id]
lowerCAmelCase__ : Dict = [self.eos_token_id]
def _A ( self : Dict , a__ : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.get_lang_token(__lowerCAmelCase )
lowerCAmelCase__ : List[Any] = self.lang_token_to_id[lang_token]
lowerCAmelCase__ : Optional[Any] = [self.cur_lang_id]
lowerCAmelCase__ : Dict = [self.eos_token_id]
def _A ( self : int , a__ : Optional[int] ):
'''simple docstring'''
return self.lang_code_to_token[lang]
def _A ( self : str , a__ : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.get_lang_token(__lowerCAmelCase )
return self.lang_token_to_id[lang_token]
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = sentencepiece.SentencePieceProcessor(**__snake_case )
spm.Load(str(__snake_case ) )
return spm
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
with open(__snake_case , "r" ) as f:
return json.load(__snake_case )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
with open(__snake_case , "w" ) as f:
json.dump(__snake_case , __snake_case , indent=2 )
| 378 |
_a = "\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
_a = [{"type": "code", "content": INSTALL_CONTENT}]
_a = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 481 | 0 |
import os
def _A ( ):
lowercase__ = os.path.join(os.path.dirname(__magic_name__ ) , "num.txt" )
with open(__magic_name__ ) as file_hand:
return str(sum(int(__magic_name__ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 611 |
_snake_case = [
(1000, """M"""),
(900, """CM"""),
(500, """D"""),
(400, """CD"""),
(100, """C"""),
(90, """XC"""),
(50, """L"""),
(40, """XL"""),
(10, """X"""),
(9, """IX"""),
(5, """V"""),
(4, """IV"""),
(1, """I"""),
]
def _A ( __magic_name__ ):
lowercase__ = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
lowercase__ = 0
lowercase__ = 0
while place < len(__magic_name__ ):
if (place + 1 < len(__magic_name__ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def _A ( __magic_name__ ):
lowercase__ = []
for arabic, roman in ROMAN:
((lowercase__) , (lowercase__)) = divmod(__magic_name__ , __magic_name__ )
result.append(roman * factor )
if number == 0:
break
return "".join(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 611 | 1 |
'''simple docstring'''
import math
import flax.linen as nn
import jax.numpy as jnp
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 1 , UpperCAmelCase = 1 , UpperCAmelCase = 1.0E4 , UpperCAmelCase = False , UpperCAmelCase = 1.0 , ):
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even"""
lowercase__ : Dict = float(embedding_dim // 2 )
lowercase__ : Optional[Any] = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
lowercase__ : Dict = min_timescale * jnp.exp(jnp.arange(UpperCAmelCase , dtype=jnp.floataa ) * -log_timescale_increment )
lowercase__ : List[str] = jnp.expand_dims(UpperCAmelCase , 1 ) * jnp.expand_dims(UpperCAmelCase , 0 )
# scale embeddings
lowercase__ : Optional[Any] = scale * emb
if flip_sin_to_cos:
lowercase__ : str = jnp.concatenate([jnp.cos(UpperCAmelCase ), jnp.sin(UpperCAmelCase )] , axis=1 )
else:
lowercase__ : List[Any] = jnp.concatenate([jnp.sin(UpperCAmelCase ), jnp.cos(UpperCAmelCase )] , axis=1 )
lowercase__ : List[Any] = jnp.reshape(UpperCAmelCase , [jnp.shape(UpperCAmelCase )[0], embedding_dim] )
return signal
class UpperCAmelCase ( nn.Module ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 3_2
SCREAMING_SNAKE_CASE = jnp.floataa
@nn.compact
def __call__( self , __lowerCAmelCase ) -> Dict:
lowercase__ : int = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_1''' )(__lowerCAmelCase )
lowercase__ : Any = nn.silu(__lowerCAmelCase )
lowercase__ : Optional[int] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_2''' )(__lowerCAmelCase )
return temb
class UpperCAmelCase ( nn.Module ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 3_2
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = 1
@nn.compact
def __call__( self , __lowerCAmelCase ) -> Optional[Any]:
return get_sinusoidal_embeddings(
__lowerCAmelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 152 | '''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase = 400_0000 ):
lowercase__ : List[Any] = [0, 1]
lowercase__ : Union[str, Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowercase__ : Dict = 0
for j in range(len(UpperCAmelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'{solution() = }')
| 152 | 1 |
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
UpperCamelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class a ( __UpperCAmelCase ):
def __init__( self : Optional[int] , *snake_case__ : List[Any] , **snake_case__ : int ):
"""simple docstring"""
super().__init__(*snake_case__ , **snake_case__ )
requires_backends(self , "decord" )
self.check_model_type(snake_case__ )
def UpperCAmelCase__ ( self : Any , snake_case__ : str=None , snake_case__ : int=None , snake_case__ : List[str]=None ):
"""simple docstring"""
__lowerCAmelCase = {}
if frame_sampling_rate is not None:
__lowerCAmelCase = frame_sampling_rate
if num_frames is not None:
__lowerCAmelCase = num_frames
__lowerCAmelCase = {}
if top_k is not None:
__lowerCAmelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Dict , snake_case__ : Union[str, List[str]] , **snake_case__ : Dict ):
"""simple docstring"""
return super().__call__(snake_case__ , **snake_case__ )
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any]=None , snake_case__ : Any=1 ):
"""simple docstring"""
if num_frames is None:
__lowerCAmelCase = self.model.config.num_frames
if video.startswith("http://" ) or video.startswith("https://" ):
__lowerCAmelCase = BytesIO(requests.get(snake_case__ ).content )
__lowerCAmelCase = VideoReader(snake_case__ )
videoreader.seek(0 )
__lowerCAmelCase = 0
__lowerCAmelCase = num_frames * frame_sampling_rate - 1
__lowerCAmelCase = np.linspace(snake_case__ , snake_case__ , num=snake_case__ , dtype=np.intaa )
__lowerCAmelCase = videoreader.get_batch(snake_case__ ).asnumpy()
__lowerCAmelCase = list(snake_case__ )
__lowerCAmelCase = self.image_processor(snake_case__ , return_tensors=self.framework )
return model_inputs
def UpperCAmelCase__ ( self : Any , snake_case__ : Any ):
"""simple docstring"""
__lowerCAmelCase = self.model(**snake_case__ )
return model_outputs
def UpperCAmelCase__ ( self : str , snake_case__ : Tuple , snake_case__ : str=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
__lowerCAmelCase = self.model.config.num_labels
if self.framework == "pt":
__lowerCAmelCase = model_outputs.logits.softmax(-1 )[0]
__lowerCAmelCase , __lowerCAmelCase = probs.topk(snake_case__ )
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
__lowerCAmelCase = scores.tolist()
__lowerCAmelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(snake_case__ , snake_case__ )]
| 716 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class a ( __UpperCAmelCase ):
lowercase_ : str = 'distilbert'
lowercase_ : Any = {
'hidden_size': 'dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
}
def __init__( self : Optional[int] , snake_case__ : int=30_522 , snake_case__ : str=512 , snake_case__ : Tuple=False , snake_case__ : Tuple=6 , snake_case__ : Any=12 , snake_case__ : Dict=768 , snake_case__ : Any=4 * 768 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : str=0.1 , snake_case__ : Tuple="gelu" , snake_case__ : str=0.0_2 , snake_case__ : Optional[Any]=0.1 , snake_case__ : Optional[Any]=0.2 , snake_case__ : str=0 , **snake_case__ : Dict , ):
"""simple docstring"""
__lowerCAmelCase = vocab_size
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = sinusoidal_pos_embds
__lowerCAmelCase = n_layers
__lowerCAmelCase = n_heads
__lowerCAmelCase = dim
__lowerCAmelCase = hidden_dim
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation
__lowerCAmelCase = initializer_range
__lowerCAmelCase = qa_dropout
__lowerCAmelCase = seq_classif_dropout
super().__init__(**snake_case__ , pad_token_id=snake_case__ )
class a ( __UpperCAmelCase ):
@property
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
if self.task == "multiple-choice":
__lowerCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
__lowerCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 376 | 0 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
SCREAMING_SNAKE_CASE__ = ['''small''', '''medium''', '''large''']
SCREAMING_SNAKE_CASE__ = '''lm_head.decoder.weight'''
SCREAMING_SNAKE_CASE__ = '''lm_head.weight'''
def A ( __UpperCamelCase , __UpperCamelCase ) -> Any:
A__ = torch.load(__UpperCamelCase )
A__ = d.pop(__UpperCamelCase )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
torch.save(__UpperCamelCase , os.path.join(__UpperCamelCase , __UpperCamelCase ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
SCREAMING_SNAKE_CASE__ = os.path.join(args.dialogpt_path, f'{MODEL}_ft.pkl')
SCREAMING_SNAKE_CASE__ = f'./DialoGPT-{MODEL}'
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 9 |
SCREAMING_SNAKE_CASE__ = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> list[str]:
A__ = set()
# keep track of all the paths to be checked
A__ = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
A__ = queue.pop(0 )
# get the last node from the path
A__ = path[-1]
if node not in explored:
A__ = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
A__ = list(__UpperCamelCase )
new_path.append(__UpperCamelCase )
queue.append(__UpperCamelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(__UpperCamelCase )
# in case there's no path between the 2 nodes
return []
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
A__ = [start]
A__ = set(__UpperCamelCase )
# Keep tab on distances from `start` node.
A__ = {start: 0, target: -1}
while queue:
A__ = queue.pop(0 )
if node == target:
A__ = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(__UpperCamelCase )
queue.append(__UpperCamelCase )
A__ = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 9 | 1 |
'''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='''%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s''',
datefmt='''%Y-%m-%d %H:%M:%S''',
level=os.environ.get('''LOGLEVEL''', '''INFO''').upper(),
stream=sys.stdout,
)
_lowerCAmelCase = logging.getLogger(__name__)
_lowerCAmelCase = {'''facebook/bart-base''': BartForConditionalGeneration}
_lowerCAmelCase = {'''facebook/bart-base''': BartTokenizer}
def __lowerCAmelCase ( ):
__UpperCamelCase : Any = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." )
parser.add_argument(
"--validation_file" , type=snake_case__ , default=snake_case__ , help="A csv or a json file containing the validation data." )
parser.add_argument(
"--max_length" , type=snake_case__ , default=5 , help="The maximum total input sequence length after tokenization." , )
parser.add_argument(
"--num_beams" , type=snake_case__ , default=snake_case__ , help=(
"Number of beams to use for evaluation. This argument will be "
"passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."
) , )
parser.add_argument(
"--model_name_or_path" , type=snake_case__ , help="Path to pretrained model or model identifier from huggingface.co/models." , required=snake_case__ , )
parser.add_argument(
"--config_name" , type=snake_case__ , default=snake_case__ , help="Pretrained config name or path if not the same as model_name" , )
parser.add_argument(
"--device" , type=snake_case__ , default="cpu" , help="Device where the model will be run" , )
parser.add_argument("--output_file_path" , type=snake_case__ , default=snake_case__ , help="Where to store the final ONNX file." )
__UpperCamelCase : Any = parser.parse_args()
return args
def __lowerCAmelCase ( snake_case__ , snake_case__="cpu" ):
__UpperCamelCase : str = model_dict[model_name].from_pretrained(snake_case__ ).to(snake_case__ )
__UpperCamelCase : int = tokenizer_dict[model_name].from_pretrained(snake_case__ )
if model_name in ["facebook/bart-base"]:
__UpperCamelCase : Tuple = 0
__UpperCamelCase : Optional[Any] = None
__UpperCamelCase : Any = 0
return huggingface_model, tokenizer
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
model.eval()
__UpperCamelCase : Tuple = None
__UpperCamelCase : int = torch.jit.script(BARTBeamSearchGenerator(snake_case__ ) )
with torch.no_grad():
__UpperCamelCase : int = "My friends are cool but they eat too many carbs."
__UpperCamelCase : int = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors="pt" ).to(model.device )
__UpperCamelCase : Optional[int] = model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , num_beams=snake_case__ , max_length=snake_case__ , early_stopping=snake_case__ , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
snake_case__ , (
inputs["input_ids"],
inputs["attention_mask"],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , snake_case__ , opset_version=14 , input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] , output_names=["output_ids"] , dynamic_axes={
"input_ids": {0: "batch", 1: "seq"},
"output_ids": {0: "batch", 1: "seq_out"},
} , example_outputs=snake_case__ , )
logger.info("Model exported to {}".format(snake_case__ ) )
__UpperCamelCase : str = remove_dup_initializers(os.path.abspath(snake_case__ ) )
logger.info("Deduplicated and optimized model written to {}".format(snake_case__ ) )
__UpperCamelCase : Any = onnxruntime.InferenceSession(snake_case__ )
__UpperCamelCase : str = ort_sess.run(
snake_case__ , {
"input_ids": inputs["input_ids"].cpu().numpy(),
"attention_mask": inputs["attention_mask"].cpu().numpy(),
"num_beams": np.array(snake_case__ ),
"max_length": np.array(snake_case__ ),
"decoder_start_token_id": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info("Model outputs from torch and ONNX Runtime are similar." )
logger.info("Success." )
def __lowerCAmelCase ( ):
__UpperCamelCase : Any = parse_args()
__UpperCamelCase : Dict = 5
__UpperCamelCase : Any = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
__UpperCamelCase : List[Any] = torch.device(args.device )
__UpperCamelCase , __UpperCamelCase : Dict = load_model_tokenizer(args.model_name_or_path , snake_case__ )
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" )
model.to(snake_case__ )
if args.max_length:
__UpperCamelCase : Dict = args.max_length
if args.num_beams:
__UpperCamelCase : List[Any] = args.num_beams
if args.output_file_path:
__UpperCamelCase : Optional[int] = args.output_file_path
else:
__UpperCamelCase : Union[str, Any] = "BART.onnx"
logger.info("Exporting model to ONNX" )
export_and_validate_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 717 |
'''simple docstring'''
from typing import Any
import numpy as np
def __lowerCAmelCase ( snake_case__ ):
return np.array_equal(snake_case__ , matrix.conjugate().T )
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
__UpperCamelCase : Dict = v.conjugate().T
__UpperCamelCase : int = v_star.dot(snake_case__ )
assert isinstance(snake_case__ , np.ndarray )
return (v_star_dot.dot(snake_case__ )) / (v_star.dot(snake_case__ ))
def __lowerCAmelCase ( ):
__UpperCamelCase : Any = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
__UpperCamelCase : str = np.array([[1], [2], [3]] )
assert is_hermitian(snake_case__ ), F"{a} is not hermitian."
print(rayleigh_quotient(snake_case__ , snake_case__ ) )
__UpperCamelCase : Optional[int] = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(snake_case__ ), F"{a} is not hermitian."
assert rayleigh_quotient(snake_case__ , snake_case__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 399 | 0 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
a_ :Union[str, Any] = 0
a_ :Any = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a_ :Union[str, Any] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
a_ :List[str] = tuple[int, int]
class snake_case__ :
"""simple docstring"""
def __init__( self : Optional[Any], _snake_case : int, _snake_case : int, _snake_case : int, _snake_case : int, _snake_case : int, _snake_case : Node | None, ) ->None:
snake_case__ : List[Any] = pos_x
snake_case__ : Dict = pos_y
snake_case__ : Any = (pos_y, pos_x)
snake_case__ : Union[str, Any] = goal_x
snake_case__ : int = goal_y
snake_case__ : Any = g_cost
snake_case__ : Optional[int] = parent
snake_case__ : str = self.calculate_heuristic()
snake_case__ : int = self.g_cost + self.h_cost
def lowercase_ ( self : Tuple ) ->float:
snake_case__ : Tuple = self.pos_x - self.goal_x
snake_case__ : Optional[Any] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(_snake_case ) + abs(_snake_case )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : str, _snake_case : Node ) ->bool:
return self.f_cost < other.f_cost
class snake_case__ :
"""simple docstring"""
def __init__( self : Any, _snake_case : TPosition, _snake_case : TPosition ) ->int:
snake_case__ : List[Any] = Node(start[1], start[0], goal[1], goal[0], 0, _snake_case )
snake_case__ : Dict = Node(goal[1], goal[0], goal[1], goal[0], 9_9_9_9_9, _snake_case )
snake_case__ : Tuple = [self.start]
snake_case__ : list[Node] = []
snake_case__ : Tuple = False
def lowercase_ ( self : Any ) ->list[TPosition]:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
snake_case__ : List[Any] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(_snake_case )
self.closed_nodes.append(_snake_case )
snake_case__ : Any = self.get_successors(_snake_case )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_snake_case )
else:
# retrieve the best current path
snake_case__ : Union[str, Any] = self.open_nodes.pop(self.open_nodes.index(_snake_case ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_snake_case )
else:
self.open_nodes.append(_snake_case )
return [self.start.pos]
def lowercase_ ( self : List[Any], _snake_case : Node ) ->list[Node]:
snake_case__ : Any = []
for action in delta:
snake_case__ : Tuple = parent.pos_x + action[1]
snake_case__ : str = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_snake_case ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_snake_case, _snake_case, self.target.pos_y, self.target.pos_x, parent.g_cost + 1, _snake_case, ) )
return successors
def lowercase_ ( self : List[Any], _snake_case : Node | None ) ->list[TPosition]:
snake_case__ : List[str] = node
snake_case__ : Optional[int] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case__ : Tuple = current_node.parent
path.reverse()
return path
class snake_case__ :
"""simple docstring"""
def __init__( self : List[Any], _snake_case : TPosition, _snake_case : TPosition ) ->None:
snake_case__ : Dict = AStar(_snake_case, _snake_case )
snake_case__ : List[Any] = AStar(_snake_case, _snake_case )
snake_case__ : Dict = False
def lowercase_ ( self : Optional[Any] ) ->list[TPosition]:
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
snake_case__ : Optional[Any] = self.fwd_astar.open_nodes.pop(0 )
snake_case__ : int = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
_snake_case, _snake_case )
self.fwd_astar.closed_nodes.append(_snake_case )
self.bwd_astar.closed_nodes.append(_snake_case )
snake_case__ : List[Any] = current_bwd_node
snake_case__ : Tuple = current_fwd_node
snake_case__ : Optional[Any] = {
self.fwd_astar: self.fwd_astar.get_successors(_snake_case ),
self.bwd_astar: self.bwd_astar.get_successors(_snake_case ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(_snake_case )
else:
# retrieve the best current path
snake_case__ : Union[str, Any] = astar.open_nodes.pop(
astar.open_nodes.index(_snake_case ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(_snake_case )
else:
astar.open_nodes.append(_snake_case )
return [self.fwd_astar.start.pos]
def lowercase_ ( self : Tuple, _snake_case : Node, _snake_case : Node ) ->list[TPosition]:
snake_case__ : Any = self.fwd_astar.retrace_path(_snake_case )
snake_case__ : Union[str, Any] = self.bwd_astar.retrace_path(_snake_case )
bwd_path.pop()
bwd_path.reverse()
snake_case__ : str = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
a_ :Any = (0, 0)
a_ :Optional[int] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
a_ :List[Any] = time.time()
a_ :int = AStar(init, goal)
a_ :Optional[Any] = a_star.search()
a_ :Optional[Any] = time.time() - start_time
print(F"""AStar execution time = {end_time:f} seconds""")
a_ :int = time.time()
a_ :int = BidirectionalAStar(init, goal)
a_ :Union[str, Any] = time.time() - bd_start_time
print(F"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 478 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ :int = 16
a_ :Tuple = 32
def lowercase_ (A : Accelerator , A : int = 1_6 ):
snake_case__ : Union[str, Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
snake_case__ : Tuple = load_dataset('glue' , 'mrpc' )
def tokenize_function(A : int ):
# max_length=None => use the model max length (it's actually the default)
snake_case__ : Any = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=A , max_length=A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case__ : Union[str, Any] = datasets.map(
A , batched=A , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case__ : List[Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(A : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case__ : Any = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case__ : Tuple = 1_6
elif accelerator.mixed_precision != "no":
snake_case__ : Optional[int] = 8
else:
snake_case__ : Optional[Any] = None
return tokenizer.pad(
A , padding='longest' , max_length=A , pad_to_multiple_of=A , return_tensors='pt' , )
# Instantiate dataloaders.
snake_case__ : Union[str, Any] = DataLoader(
tokenized_datasets['train'] , shuffle=A , collate_fn=A , batch_size=A )
snake_case__ : int = DataLoader(
tokenized_datasets['validation'] , shuffle=A , collate_fn=A , batch_size=A )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a_ :Tuple = mocked_dataloaders # noqa: F811
def lowercase_ (A : List[str] , A : Optional[Any] ):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , A ) == "1":
snake_case__ : int = 2
# Initialize accelerator
snake_case__ : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case__ : Any = config['lr']
snake_case__ : Dict = int(config['num_epochs'] )
snake_case__ : Tuple = int(config['seed'] )
snake_case__ : Any = int(config['batch_size'] )
snake_case__ : Optional[int] = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
snake_case__ : Dict = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
snake_case__ : int = batch_size // MAX_GPU_BATCH_SIZE
snake_case__ : List[Any] = MAX_GPU_BATCH_SIZE
set_seed(A )
snake_case__ , snake_case__ : str = get_dataloaders(A , A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case__ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=A )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case__ : Union[str, Any] = model.to(accelerator.device )
# Instantiate optimizer
snake_case__ : int = AdamW(params=model.parameters() , lr=A )
# Instantiate scheduler
snake_case__ : Any = get_linear_schedule_with_warmup(
optimizer=A , num_warmup_steps=1_0_0 , num_training_steps=(len(A ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ : Optional[int] = accelerator.prepare(
A , A , A , A , A )
# Now we train the model
for epoch in range(A ):
model.train()
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case__ : int = model(**A )
snake_case__ : Optional[Any] = outputs.loss
snake_case__ : List[str] = loss / gradient_accumulation_steps
accelerator.backward(A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
snake_case__ : Optional[Any] = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case__ : Optional[Any] = model(**A )
snake_case__ : Dict = outputs.logits.argmax(dim=-1 )
snake_case__ , snake_case__ : int = accelerator.gather((predictions, batch['labels']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(A ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
snake_case__ : Union[str, Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
snake_case__ : Optional[Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=A , references=A , )
snake_case__ : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , A )
def lowercase_ ():
snake_case__ : Optional[Any] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=A , default=A , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
snake_case__ : str = parser.parse_args()
snake_case__ : Any = {'lr': 2e-5, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 1_6}
training_function(A , A )
if __name__ == "__main__":
main()
| 478 | 1 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
__A =re.compile(R'''\b(a|an|the)\b''', re.UNICODE)
__A =None
def lowerCamelCase_ ( ):
lowerCamelCase_ = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=lowerCamelCase__ , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=lowerCamelCase__ , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCamelCase_ = bool(qa["answers"]["text"] )
return qid_to_has_ans
def lowerCamelCase_ ( lowerCamelCase__ ):
def remove_articles(lowerCamelCase__ ):
return ARTICLES_REGEX.sub(" " , lowerCamelCase__ )
def white_space_fix(lowerCamelCase__ ):
return " ".join(text.split() )
def remove_punc(lowerCamelCase__ ):
lowerCamelCase_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCamelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase__ ) ) ) )
def lowerCamelCase_ ( lowerCamelCase__ ):
if not s:
return []
return normalize_answer(lowerCamelCase__ ).split()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
return int(normalize_answer(lowerCamelCase__ ) == normalize_answer(lowerCamelCase__ ) )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = get_tokens(lowerCamelCase__ )
lowerCamelCase_ = get_tokens(lowerCamelCase__ )
lowerCamelCase_ = collections.Counter(lowerCamelCase__ ) & collections.Counter(lowerCamelCase__ )
lowerCamelCase_ = sum(common.values() )
if len(lowerCamelCase__ ) == 0 or len(lowerCamelCase__ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
lowerCamelCase_ = 1.0 * num_same / len(lowerCamelCase__ )
lowerCamelCase_ = 1.0 * num_same / len(lowerCamelCase__ )
lowerCamelCase_ = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = {}
lowerCamelCase_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCamelCase_ = qa["id"]
lowerCamelCase_ = [t for t in qa["answers"]["text"] if normalize_answer(lowerCamelCase__ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
lowerCamelCase_ = [""]
if qid not in preds:
print(F'Missing prediction for {qid}' )
continue
lowerCamelCase_ = preds[qid]
# Take max over all gold answers
lowerCamelCase_ = max(compute_exact(lowerCamelCase__ , lowerCamelCase__ ) for a in gold_answers )
lowerCamelCase_ = max(compute_fa(lowerCamelCase__ , lowerCamelCase__ ) for a in gold_answers )
return exact_scores, fa_scores
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = {}
for qid, s in scores.items():
lowerCamelCase_ = na_probs[qid] > na_prob_thresh
if pred_na:
lowerCamelCase_ = float(not qid_to_has_ans[qid] )
else:
lowerCamelCase_ = s
return new_scores
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ):
if not qid_list:
lowerCamelCase_ = len(lowerCamelCase__ )
return collections.OrderedDict(
[
("exact", 1_00.0 * sum(exact_scores.values() ) / total),
("f1", 1_00.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
lowerCamelCase_ = len(lowerCamelCase__ )
return collections.OrderedDict(
[
("exact", 1_00.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 1_00.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
for k in new_eval:
lowerCamelCase_ = new_eval[k]
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
plt.step(lowerCamelCase__ , lowerCamelCase__ , color="b" , alpha=0.2 , where="post" )
plt.fill_between(lowerCamelCase__ , lowerCamelCase__ , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(lowerCamelCase__ )
plt.savefig(lowerCamelCase__ )
plt.clf()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None ):
lowerCamelCase_ = sorted(lowerCamelCase__ , key=lambda lowerCamelCase__ : na_probs[k] )
lowerCamelCase_ = 0.0
lowerCamelCase_ = 1.0
lowerCamelCase_ = 0.0
lowerCamelCase_ = [1.0]
lowerCamelCase_ = [0.0]
lowerCamelCase_ = 0.0
for i, qid in enumerate(lowerCamelCase__ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
lowerCamelCase_ = true_pos / float(i + 1 )
lowerCamelCase_ = true_pos / float(lowerCamelCase__ )
if i == len(lowerCamelCase__ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(lowerCamelCase__ )
recalls.append(lowerCamelCase__ )
if out_image:
plot_pr_curve(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return {"ap": 1_00.0 * avg_prec}
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if out_image_dir and not os.path.exists(lowerCamelCase__ ):
os.makedirs(lowerCamelCase__ )
lowerCamelCase_ = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
lowerCamelCase_ = make_precision_recall_eval(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , out_image=os.path.join(lowerCamelCase__ , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
lowerCamelCase_ = make_precision_recall_eval(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , out_image=os.path.join(lowerCamelCase__ , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
lowerCamelCase_ = {k: float(lowerCamelCase__ ) for k, v in qid_to_has_ans.items()}
lowerCamelCase_ = make_precision_recall_eval(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , out_image=os.path.join(lowerCamelCase__ , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(lowerCamelCase__ , lowerCamelCase__ , "pr_exact" )
merge_eval(lowerCamelCase__ , lowerCamelCase__ , "pr_f1" )
merge_eval(lowerCamelCase__ , lowerCamelCase__ , "pr_oracle" )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if not qid_list:
return
lowerCamelCase_ = [na_probs[k] for k in qid_list]
lowerCamelCase_ = np.ones_like(lowerCamelCase__ ) / float(len(lowerCamelCase__ ) )
plt.hist(lowerCamelCase__ , weights=lowerCamelCase__ , bins=2_0 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(F'Histogram of no-answer probability: {name}' )
plt.savefig(os.path.join(lowerCamelCase__ , F'na_prob_hist_{name}.png' ) )
plt.clf()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
lowerCamelCase_ = num_no_ans
lowerCamelCase_ = cur_score
lowerCamelCase_ = 0.0
lowerCamelCase_ = sorted(lowerCamelCase__ , key=lambda lowerCamelCase__ : na_probs[k] )
for i, qid in enumerate(lowerCamelCase__ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
lowerCamelCase_ = scores[qid]
else:
if preds[qid]:
lowerCamelCase_ = -1
else:
lowerCamelCase_ = 0
cur_score += diff
if cur_score > best_score:
lowerCamelCase_ = cur_score
lowerCamelCase_ = na_probs[qid]
return 1_00.0 * best_score / len(lowerCamelCase__ ), best_thresh
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ , lowerCamelCase_ = find_best_thresh(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ = find_best_thresh(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = best_exact
lowerCamelCase_ = exact_thresh
lowerCamelCase_ = best_fa
lowerCamelCase_ = fa_thresh
def lowerCamelCase_ ( ):
with open(OPTS.data_file ) as f:
lowerCamelCase_ = json.load(lowerCamelCase__ )
lowerCamelCase_ = dataset_json["data"]
with open(OPTS.pred_file ) as f:
lowerCamelCase_ = json.load(lowerCamelCase__ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
lowerCamelCase_ = json.load(lowerCamelCase__ )
else:
lowerCamelCase_ = {k: 0.0 for k in preds}
lowerCamelCase_ = make_qid_to_has_ans(lowerCamelCase__ ) # maps qid to True/False
lowerCamelCase_ = [k for k, v in qid_to_has_ans.items() if v]
lowerCamelCase_ = [k for k, v in qid_to_has_ans.items() if not v]
lowerCamelCase_ , lowerCamelCase_ = get_raw_scores(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = apply_no_ans_threshold(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , OPTS.na_prob_thresh )
lowerCamelCase_ = apply_no_ans_threshold(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , OPTS.na_prob_thresh )
lowerCamelCase_ = make_eval_dict(lowerCamelCase__ , lowerCamelCase__ )
if has_ans_qids:
lowerCamelCase_ = make_eval_dict(lowerCamelCase__ , lowerCamelCase__ , qid_list=lowerCamelCase__ )
merge_eval(lowerCamelCase__ , lowerCamelCase__ , "HasAns" )
if no_ans_qids:
lowerCamelCase_ = make_eval_dict(lowerCamelCase__ , lowerCamelCase__ , qid_list=lowerCamelCase__ )
merge_eval(lowerCamelCase__ , lowerCamelCase__ , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , OPTS.out_image_dir )
histogram_na_prob(lowerCamelCase__ , lowerCamelCase__ , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(lowerCamelCase__ , lowerCamelCase__ , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
else:
print(json.dumps(lowerCamelCase__ , indent=2 ) )
if __name__ == "__main__":
__A =parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 313 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self , lowercase , lowercase=7 , lowercase=3 , lowercase=18 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=True , lowercase=None , lowercase=True , ) -> Union[str, Any]:
lowerCamelCase_ = size if size is not None else {"shortest_edge": 20}
lowerCamelCase_ = crop_size if crop_size is not None else {"height": 18, "width": 18}
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = image_size
lowerCamelCase_ = min_resolution
lowerCamelCase_ = max_resolution
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = do_center_crop
lowerCamelCase_ = crop_size
lowerCamelCase_ = do_flip_channel_order
def SCREAMING_SNAKE_CASE_( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = MobileViTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = MobileViTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_( self ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , "do_resize" ) )
self.assertTrue(hasattr(lowercase , "size" ) )
self.assertTrue(hasattr(lowercase , "do_center_crop" ) )
self.assertTrue(hasattr(lowercase , "center_crop" ) )
self.assertTrue(hasattr(lowercase , "do_flip_channel_order" ) )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
pass
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase_ = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase_ = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def SCREAMING_SNAKE_CASE_( self ) -> str:
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase_ = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 313 | 1 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass
class __magic_name__ ( lowercase__ ):
_SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.0 , metadata={'help': 'The label smoothing epsilon to apply (if not zero).'} )
_SCREAMING_SNAKE_CASE : bool = field(default=lowercase__ , metadata={'help': 'Whether to SortishSamler or not.'} )
_SCREAMING_SNAKE_CASE : bool = field(
default=lowercase__ , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
_SCREAMING_SNAKE_CASE : bool = field(default=lowercase__ , metadata={'help': 'whether to use adafactor'} )
_SCREAMING_SNAKE_CASE : Optional[float] = field(
default=lowercase__ , metadata={'help': 'Encoder layer dropout probability. Goes into model.config.'} )
_SCREAMING_SNAKE_CASE : Optional[float] = field(
default=lowercase__ , metadata={'help': 'Decoder layer dropout probability. Goes into model.config.'} )
_SCREAMING_SNAKE_CASE : Optional[float] = field(default=lowercase__ , metadata={'help': 'Dropout probability. Goes into model.config.'} )
_SCREAMING_SNAKE_CASE : Optional[float] = field(
default=lowercase__ , metadata={'help': 'Attention dropout probability. Goes into model.config.'} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default='linear' , metadata={'help': f"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""} , )
| 163 |
"""simple docstring"""
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
_SCREAMING_SNAKE_CASE = """pytorch_model.bin"""
@dataclasses.dataclass
class __magic_name__ :
_SCREAMING_SNAKE_CASE : str = dataclasses.field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models.'} )
_SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default=lowercase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co.'} , )
@dataclasses.dataclass
class __magic_name__ :
_SCREAMING_SNAKE_CASE : str = dataclasses.field(metadata={'help': 'A csv or a json file containing the training data.'} )
_SCREAMING_SNAKE_CASE : str = dataclasses.field(metadata={'help': 'A csv or a json file containing the data to predict on.'} )
_SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default=lowercase__ , metadata={'help': 'A csv or a json file containing the validation data.'} )
_SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default=lowercase__ , metadata={'help': 'The name of the task to train on.'} , )
_SCREAMING_SNAKE_CASE : Optional[List[str]] = dataclasses.field(
default=lowercase__ , metadata={'help': 'The list of labels for the task.'} )
@dataclasses.dataclass
class __magic_name__ :
_SCREAMING_SNAKE_CASE : str = dataclasses.field(
metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'} )
_SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default='accuracy' , metadata={'help': 'The evaluation metric used for the task.'} )
_SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default='no' , metadata={
'help': 'The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'
} , )
_SCREAMING_SNAKE_CASE : Optional[int] = dataclasses.field(
default=10 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
_SCREAMING_SNAKE_CASE : Optional[float] = dataclasses.field(
default=0.0 , metadata={
'help': 'How much the specified evaluation metric must improve to satisfy early stopping conditions.'
} , )
_SCREAMING_SNAKE_CASE : Optional[bool] = dataclasses.field(
default=lowercase__ , metadata={'help': 'Whether to filter the pseudo-labeled data based on the confidence score.'} , )
_SCREAMING_SNAKE_CASE : Optional[bool] = dataclasses.field(
default=lowercase__ , metadata={'help': 'Whether to filter the pseudo-labeled data based on the validation performance.'} , )
_SCREAMING_SNAKE_CASE : Optional[bool] = dataclasses.field(
default=lowercase__ , metadata={'help': 'Whether to fine-tune on labeled data after pseudo training.'} , )
_SCREAMING_SNAKE_CASE : Optional[float] = dataclasses.field(
default=0.0 , metadata={'help': 'Confidence threshold for pseudo-labeled data filtering.'} , )
_SCREAMING_SNAKE_CASE : Optional[int] = dataclasses.field(
default=100 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
_SCREAMING_SNAKE_CASE : Optional[int] = dataclasses.field(
default=lowercase__ , metadata={'help': 'Random seed for initialization.'} , )
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
__snake_case = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
__snake_case = dataset.filter(lambda SCREAMING_SNAKE_CASE : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
__snake_case = int(eval_result * len(SCREAMING_SNAKE_CASE ) )
print(SCREAMING_SNAKE_CASE )
__snake_case = dataset.sort("probability" , reverse=SCREAMING_SNAKE_CASE )
__snake_case = dataset.select(range(SCREAMING_SNAKE_CASE ) )
__snake_case = dataset.remove_columns(["label", "probability"] )
__snake_case = dataset.rename_column("prediction" , "label" )
__snake_case = dataset.map(lambda SCREAMING_SNAKE_CASE : {"label": idalabel[example["label"]]} )
__snake_case = dataset.shuffle(seed=args.seed )
__snake_case = os.path.join(SCREAMING_SNAKE_CASE , F'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(SCREAMING_SNAKE_CASE , index=SCREAMING_SNAKE_CASE )
else:
dataset.to_json(SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
__snake_case = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
__snake_case = STModelArguments(model_name_or_path=SCREAMING_SNAKE_CASE )
__snake_case = STDataArguments(train_file=SCREAMING_SNAKE_CASE , infer_file=SCREAMING_SNAKE_CASE )
__snake_case = STTrainingArguments(output_dir=SCREAMING_SNAKE_CASE )
__snake_case = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(SCREAMING_SNAKE_CASE ).items():
setattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for key, value in kwargs.items():
if hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
setattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Sanity checks
__snake_case = {}
__snake_case = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
__snake_case = args.train_file
__snake_case = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
__snake_case = args.eval_file
for key in data_files:
__snake_case = data_files[key].split("." )[-1]
assert extension in ["csv", "json"], F'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
__snake_case = extension
else:
assert extension == args.data_file_extension, F'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), F'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("Creating the initial data directory for self-training..." )
__snake_case = F'''{args.output_dir}/self-train_iter-{{}}'''.format
__snake_case = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=SCREAMING_SNAKE_CASE )
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
__snake_case = None
__snake_case = None
__snake_case = 0
__snake_case = False
# Show the progress bar
__snake_case = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
__snake_case = data_dir_format(SCREAMING_SNAKE_CASE )
assert os.path.exists(SCREAMING_SNAKE_CASE )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
__snake_case = os.path.join(SCREAMING_SNAKE_CASE , "stage-1" )
__snake_case = {
"accelerator": accelerator,
"model_name_or_path": args.model_name_or_path,
"cache_dir": args.cache_dir,
"do_train": True,
"train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"],
"do_eval": True if args.eval_file is not None else False,
"eval_file": data_files["eval"],
"do_predict": True,
"infer_file": data_files["infer"],
"task_name": args.task_name,
"label_list": args.label_list,
"output_dir": current_output_dir,
"eval_metric": args.eval_metric,
"evaluation_strategy": args.evaluation_strategy,
"early_stopping_patience": args.early_stopping_patience,
"early_stopping_threshold": args.early_stopping_threshold,
"seed": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
arguments_dict.update({key: value} )
__snake_case = os.path.join(SCREAMING_SNAKE_CASE , "best-checkpoint" , SCREAMING_SNAKE_CASE )
if os.path.exists(SCREAMING_SNAKE_CASE ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1." , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 1 *****" , SCREAMING_SNAKE_CASE )
finetune(**SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
assert os.path.exists(SCREAMING_SNAKE_CASE )
logger.info("Self-training job completed: iteration: %d, stage: 1." , SCREAMING_SNAKE_CASE )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
__snake_case = os.path.join(SCREAMING_SNAKE_CASE , "best-checkpoint" )
__snake_case = os.path.join(SCREAMING_SNAKE_CASE , "stage-2" )
# Update arguments_dict
__snake_case = model_path
__snake_case = data_files["train"]
__snake_case = current_output_dir
__snake_case = os.path.join(SCREAMING_SNAKE_CASE , "best-checkpoint" , SCREAMING_SNAKE_CASE )
if os.path.exists(SCREAMING_SNAKE_CASE ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2." , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 2 *****" , SCREAMING_SNAKE_CASE )
finetune(**SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
assert os.path.exists(SCREAMING_SNAKE_CASE )
logger.info("Self-training job completed: iteration: %d, stage: 2." , SCREAMING_SNAKE_CASE )
__snake_case = iteration
__snake_case = data_dir_format(iteration + 1 )
__snake_case = AutoConfig.from_pretrained(os.path.join(SCREAMING_SNAKE_CASE , "best-checkpoint" ) )
__snake_case = config.idalabel
__snake_case = os.path.join(SCREAMING_SNAKE_CASE , "eval_results_best-checkpoint.json" )
__snake_case = os.path.join(SCREAMING_SNAKE_CASE , "test_results_best-checkpoint.json" )
assert os.path.exists(SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE , "r" ) as f:
__snake_case = float(json.load(SCREAMING_SNAKE_CASE )[args.eval_metric] )
__snake_case = os.path.join(SCREAMING_SNAKE_CASE , "infer_output_best-checkpoint.csv" )
assert os.path.exists(SCREAMING_SNAKE_CASE )
# Loading the dataset from local csv or json files.
__snake_case = load_dataset(args.data_file_extension , data_files={"data": data_files["infer"]} )["data"]
__snake_case = load_dataset("csv" , data_files={"data": infer_output_file} )["data"]
if accelerator.is_main_process:
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
shutil.copy(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , F'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(SCREAMING_SNAKE_CASE ):
shutil.copy(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , F'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
__snake_case = os.path.join(SCREAMING_SNAKE_CASE , F'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
__snake_case = eval_result
if best_iteration is None:
__snake_case = new_iteration
__snake_case = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
__snake_case = new_iteration
__snake_case = new_eval_result
__snake_case = 0
else:
if new_eval_result == best_eval_result:
__snake_case = new_iteration
__snake_case = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
__snake_case = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("Best iteration: %d" , SCREAMING_SNAKE_CASE )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE , F'''eval_results_iter-{iteration}.json''' ) , os.path.join(SCREAMING_SNAKE_CASE , "eval_results_best-iteration.json" ) , )
else:
# Assume that the last iteration is the best
logger.info("Best iteration: %d" , args.max_selftrain_iterations - 1 )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE , F'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(SCREAMING_SNAKE_CASE , "eval_results_best-iteration.json" ) , )
| 163 | 1 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _UpperCamelCase :
"""simple docstring"""
@staticmethod
def _SCREAMING_SNAKE_CASE ( *lowerCAmelCase__ , **lowerCAmelCase__ ) -> str:
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
__a : Dict = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
'''simple docstring'''
__lowercase = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
__lowercase = [
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
]
return object_detector, examples
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = object_detector(examples[0] , threshold=0.0 )
__lowercase = len(lowerCAmelCase__ )
self.assertGreater(lowerCAmelCase__ , 0 )
self.assertEqual(
lowerCAmelCase__ , [
{
'''score''': ANY(lowerCAmelCase__ ),
'''label''': ANY(lowerCAmelCase__ ),
'''box''': {'''xmin''': ANY(lowerCAmelCase__ ), '''ymin''': ANY(lowerCAmelCase__ ), '''xmax''': ANY(lowerCAmelCase__ ), '''ymax''': ANY(lowerCAmelCase__ )},
}
for i in range(lowerCAmelCase__ )
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
pass
@require_torch
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
__lowercase = object_detector(
'''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.64 , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.7235, '''label''': '''cat''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}},
{'''score''': 0.7218, '''label''': '''remote''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}},
{'''score''': 0.7184, '''label''': '''couch''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}},
{'''score''': 0.6748, '''label''': '''remote''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}},
{'''score''': 0.6656, '''label''': '''cat''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}},
{'''score''': 0.6614, '''label''': '''couch''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}},
{'''score''': 0.6456, '''label''': '''remote''', '''box''': {'''xmin''': 4_94, '''ymin''': 1_05, '''xmax''': 5_21, '''ymax''': 1_27}},
{'''score''': 0.642, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 2_74, '''xmax''': 93, '''ymax''': 2_97}},
{'''score''': 0.6419, '''label''': '''cat''', '''box''': {'''xmin''': 4_94, '''ymin''': 1_05, '''xmax''': 5_21, '''ymax''': 1_27}},
] , )
__lowercase = object_detector(
[
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.7235, '''label''': '''cat''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}},
{'''score''': 0.7218, '''label''': '''remote''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}},
{'''score''': 0.7184, '''label''': '''couch''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}},
{'''score''': 0.6748, '''label''': '''remote''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}},
{'''score''': 0.6656, '''label''': '''cat''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}},
{'''score''': 0.6614, '''label''': '''couch''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}},
{'''score''': 0.6456, '''label''': '''remote''', '''box''': {'''xmin''': 4_94, '''ymin''': 1_05, '''xmax''': 5_21, '''ymax''': 1_27}},
{'''score''': 0.642, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 2_74, '''xmax''': 93, '''ymax''': 2_97}},
{'''score''': 0.6419, '''label''': '''cat''', '''box''': {'''xmin''': 4_94, '''ymin''': 1_05, '''xmax''': 5_21, '''ymax''': 1_27}},
]
] , )
@require_torch
@slow
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = pipeline('''zero-shot-object-detection''' )
__lowercase = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 3_24, '''ymin''': 20, '''xmax''': 6_40, '''ymax''': 3_73}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 1_77, '''ymax''': 1_15}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 3_15, '''ymax''': 4_72}},
{'''score''': 0.1474, '''label''': '''remote''', '''box''': {'''xmin''': 3_35, '''ymin''': 74, '''xmax''': 3_71, '''ymax''': 1_87}},
{'''score''': 0.1208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_42, '''ymax''': 4_76}},
] , )
__lowercase = object_detector(
[
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
] , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 3_24, '''ymin''': 20, '''xmax''': 6_40, '''ymax''': 3_73}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 1_77, '''ymax''': 1_15}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 3_15, '''ymax''': 4_72}},
{'''score''': 0.1474, '''label''': '''remote''', '''box''': {'''xmin''': 3_35, '''ymin''': 74, '''xmax''': 3_71, '''ymax''': 1_87}},
{'''score''': 0.1208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_42, '''ymax''': 4_76}},
],
[
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 3_24, '''ymin''': 20, '''xmax''': 6_40, '''ymax''': 3_73}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 1_77, '''ymax''': 1_15}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 3_15, '''ymax''': 4_72}},
{'''score''': 0.1474, '''label''': '''remote''', '''box''': {'''xmin''': 3_35, '''ymin''': 74, '''xmax''': 3_71, '''ymax''': 1_87}},
{'''score''': 0.1208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_42, '''ymax''': 4_76}},
],
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@require_torch
@slow
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = 0.2
__lowercase = pipeline('''zero-shot-object-detection''' )
__lowercase = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=lowerCAmelCase__ , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 3_24, '''ymin''': 20, '''xmax''': 6_40, '''ymax''': 3_73}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 1_77, '''ymax''': 1_15}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 3_15, '''ymax''': 4_72}},
] , )
@require_torch
@slow
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase = 2
__lowercase = pipeline('''zero-shot-object-detection''' )
__lowercase = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=lowerCAmelCase__ , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 3_24, '''ymin''': 20, '''xmax''': 6_40, '''ymax''': 3_73}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 1_77, '''ymax''': 1_15}},
] , ) | 717 | import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = tempfile.mkdtemp()
__lowercase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__lowercase = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073],
'''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
__lowercase = os.path.join(self.tmpdirname , lowerCAmelCase__ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , **lowerCAmelCase__ ) -> int:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , **lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , **lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowercase = [Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.get_tokenizer()
__lowercase = self.get_rust_tokenizer()
__lowercase = self.get_image_processor()
__lowercase = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
__lowercase = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase__ )
__lowercase = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
__lowercase = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowercase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowercase = self.get_image_processor(do_normalize=lowerCAmelCase__ , padding_value=1.0 )
__lowercase = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowerCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__lowercase = self.prepare_image_inputs()
__lowercase = image_processor(lowerCAmelCase__ , return_tensors='''np''' )
__lowercase = processor(images=lowerCAmelCase__ , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__lowercase = '''lower newer'''
__lowercase = processor(text=lowerCAmelCase__ )
__lowercase = tokenizer(lowerCAmelCase__ , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__lowercase = '''lower newer'''
__lowercase = self.prepare_image_inputs()
__lowercase = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowercase = processor.batch_decode(lowerCAmelCase__ )
__lowercase = tokenizer.batch_decode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__lowercase = '''lower newer'''
__lowercase = self.prepare_image_inputs()
__lowercase = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) | 522 | 0 |
"""simple docstring"""
def _snake_case ( UpperCamelCase : int , UpperCamelCase : int ):
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError("""iterations must be defined as integers""" )
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or not number >= 1:
raise ValueError(
"""starting number must be
and integer and be more than 0""" )
if not iterations >= 1:
raise ValueError("""Iterations must be done more than 0 times to play FizzBuzz""" )
UpperCAmelCase : Union[str, Any] = """"""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(lowerCamelCase__ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 160 |
'''simple docstring'''
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : list[int] , lowerCamelCase__ : list[int] , lowerCamelCase__ : list[int] , lowerCamelCase__ : list[list[str]] , lowerCamelCase__ : int , ):
'''simple docstring'''
A: Union[str, Any] = len(lowerCamelCase__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(lowerCamelCase__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , lowerCamelCase__ , lowerCamelCase__ , )
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : int ):
'''simple docstring'''
A: list[list[str]] = []
depth_first_search([] , [] , [] , lowerCamelCase__ , lowerCamelCase__ )
# Print all the boards
for board in boards:
for column in board:
print(lowerCamelCase__ )
print("""""" )
print(len(lowerCamelCase__ ) , """solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 135 | 0 |
import numpy as np
import datasets
_A : int = '''
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
'''
_A : List[str] = '''\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
'''
_A : Optional[int] = '''
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{\'mahalanobis\': array([0.5])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""X""": datasets.Sequence(datasets.Value("""float""" , id="""sequence""" ) , id="""X""" ),
} ) , )
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any] ):
__lowerCamelCase: Optional[int] = np.array(UpperCAmelCase__ )
__lowerCamelCase: Dict = np.array(UpperCAmelCase__ )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("""Expected `X` to be a 2D vector""" )
if len(reference_distribution.shape ) != 2:
raise ValueError("""Expected `reference_distribution` to be a 2D vector""" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"""Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension""" )
# Get mahalanobis distance for each prediction
__lowerCamelCase: Optional[int] = X - np.mean(UpperCAmelCase__ )
__lowerCamelCase: str = np.cov(reference_distribution.T )
try:
__lowerCamelCase: Union[str, Any] = np.linalg.inv(UpperCAmelCase__ )
except np.linalg.LinAlgError:
__lowerCamelCase: Optional[int] = np.linalg.pinv(UpperCAmelCase__ )
__lowerCamelCase: Any = np.dot(UpperCAmelCase__ , UpperCAmelCase__ )
__lowerCamelCase: List[str] = np.dot(UpperCAmelCase__ , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 700 |
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class a :
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : str , ):
__lowerCamelCase: Optional[int] = parent
__lowerCamelCase: Dict = 13
__lowerCamelCase: List[Any] = 7
__lowerCamelCase: Optional[int] = True
__lowerCamelCase: str = True
__lowerCamelCase: Dict = False
__lowerCamelCase: Union[str, Any] = True
__lowerCamelCase: Any = 99
__lowerCamelCase: Dict = 32
__lowerCamelCase: str = 2
__lowerCamelCase: Optional[int] = 4
__lowerCamelCase: Optional[Any] = 37
__lowerCamelCase: Union[str, Any] = """gelu"""
__lowerCamelCase: List[Any] = 0.1
__lowerCamelCase: Dict = 0.1
__lowerCamelCase: str = 512
__lowerCamelCase: Dict = 16
__lowerCamelCase: List[str] = 2
__lowerCamelCase: Optional[int] = 0.02
__lowerCamelCase: Dict = 3
__lowerCamelCase: Optional[int] = 4
__lowerCamelCase: Tuple = None
def SCREAMING_SNAKE_CASE__ ( self : int ):
__lowerCamelCase: int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase: Any = None
if self.use_input_mask:
__lowerCamelCase: Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase: Optional[Any] = None
__lowerCamelCase: Optional[Any] = None
__lowerCamelCase: List[str] = None
if self.use_labels:
__lowerCamelCase: Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase: Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase: Any = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] ):
__lowerCamelCase: str = TFDistilBertModel(config=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__lowerCamelCase: Optional[Any] = model(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: int = [input_ids, input_mask]
__lowerCamelCase: str = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict ):
__lowerCamelCase: Any = TFDistilBertForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__lowerCamelCase: str = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
__lowerCamelCase: Optional[int] = TFDistilBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Tuple = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
}
__lowerCamelCase: Tuple = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
__lowerCamelCase: Any = self.num_labels
__lowerCamelCase: Optional[Any] = TFDistilBertForSequenceClassification(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__lowerCamelCase: Optional[int] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict ):
__lowerCamelCase: Tuple = self.num_choices
__lowerCamelCase: Optional[int] = TFDistilBertForMultipleChoice(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Optional[Any] = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase: str = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase: List[Any] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
}
__lowerCamelCase: int = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
__lowerCamelCase: Union[str, Any] = self.num_labels
__lowerCamelCase: Optional[int] = TFDistilBertForTokenClassification(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__lowerCamelCase: str = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
__lowerCamelCase: Dict = self.prepare_config_and_inputs()
((__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase)): Tuple = config_and_inputs
__lowerCamelCase: Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class a ( _UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
UpperCAmelCase__ : Any = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
UpperCAmelCase__ : Dict = (
{
"feature-extraction": TFDistilBertModel,
"fill-mask": TFDistilBertForMaskedLM,
"question-answering": TFDistilBertForQuestionAnswering,
"text-classification": TFDistilBertForSequenceClassification,
"token-classification": TFDistilBertForTokenClassification,
"zero-shot": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Tuple = False
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
__lowerCamelCase: List[str] = TFDistilBertModelTester(self )
__lowerCamelCase: Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , dim=37 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
__lowerCamelCase: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : int ):
__lowerCamelCase: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
__lowerCamelCase: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
__lowerCamelCase: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
__lowerCamelCase: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
__lowerCamelCase: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : int ):
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
__lowerCamelCase: List[str] = TFDistilBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_tf
class a ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
__lowerCamelCase: int = TFDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
__lowerCamelCase: Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowerCamelCase: Any = model(SCREAMING_SNAKE_CASE_ )[0]
__lowerCamelCase: Union[str, Any] = [1, 6, 768]
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: List[Any] = tf.constant(
[
[
[0.1926_1885, -0.1373_2955, 0.411_9799],
[0.2215_0156, -0.0742_2661, 0.3903_7204],
[0.2275_6018, -0.089_6414, 0.370_1467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 )
| 189 | 0 |
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''' , [
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
] , )
def A_ ( lowercase_ , lowercase_ ) -> int:
_snake_case : int = tmp_path_factory.mktemp('''dset_infos_dir''' )
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''' )
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''''' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''' , '''w''' ) as f:
f.write('''{\"default\": {\"dataset_size\": 42}}''' )
_snake_case : Optional[int] = DatasetInfosDict.from_directory(SCREAMING_SNAKE_CASE_ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'''dataset_info''' , [
DatasetInfo(),
DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , ),
] , )
def A_ ( lowercase_ , lowercase_ ) -> int:
_snake_case : List[Any] = str(SCREAMING_SNAKE_CASE_ )
dataset_info.write_to_directory(SCREAMING_SNAKE_CASE_ )
_snake_case : Optional[int] = DatasetInfo.from_directory(SCREAMING_SNAKE_CASE_ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''dataset_info.json''' ) )
def A_ ( ) -> Tuple:
_snake_case : Dict = DatasetInfo(
description='''foo''' , citation='''bar''' , homepage='''https://foo.bar''' , license='''CC0''' , features=Features({'''a''': Value('''int32''' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train''', '''num_examples''': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
_snake_case : Any = dataset_info._to_yaml_dict()
assert sorted(SCREAMING_SNAKE_CASE_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
_snake_case : Any = yaml.safe_dump(SCREAMING_SNAKE_CASE_ )
_snake_case : Tuple = yaml.safe_load(SCREAMING_SNAKE_CASE_ )
assert dataset_info_yaml_dict == reloaded
def A_ ( ) -> List[Any]:
_snake_case : Any = DatasetInfo()
_snake_case : Any = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''' , [
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()} ),
DatasetInfosDict({'''my_config_name''': DatasetInfo()} ),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=42 ),
'''v2''': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def A_ ( lowercase_ , lowercase_ ) -> str:
_snake_case : Optional[Any] = str(SCREAMING_SNAKE_CASE_ )
dataset_infos_dict.write_to_directory(SCREAMING_SNAKE_CASE_ )
_snake_case : Union[str, Any] = DatasetInfosDict.from_directory(SCREAMING_SNAKE_CASE_ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_snake_case : str = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_snake_case : Union[str, Any] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''README.md''' ) )
| 326 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase_ = {
"""configuration_mobilenet_v2""": [
"""MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileNetV2Config""",
"""MobileNetV2OnnxConfig""",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""MobileNetV2FeatureExtractor"""]
lowercase_ = ["""MobileNetV2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileNetV2ForImageClassification""",
"""MobileNetV2ForSemanticSegmentation""",
"""MobileNetV2Model""",
"""MobileNetV2PreTrainedModel""",
"""load_tf_weights_in_mobilenet_v2""",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 413 | 0 |
"""simple docstring"""
import math
import sys
def __magic_name__ ( __snake_case : int ) -> int:
if number != int(__snake_case ):
raise ValueError("the value of input must be a natural number" )
if number < 0:
raise ValueError("the value of input must not be a negative number" )
if number == 0:
return 1
lowercase : int = [-1] * (number + 1)
lowercase : List[str] = 0
for i in range(1 , number + 1 ):
lowercase : int = sys.maxsize
lowercase : Any = int(math.sqrt(__snake_case ) )
for j in range(1 , root + 1 ):
lowercase : Optional[int] = 1 + answers[i - (j**2)]
lowercase : Union[str, Any] = min(__snake_case , __snake_case )
lowercase : Tuple = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
"""simple docstring"""
import os
import jsonlines
import numpy as np
from tqdm import tqdm
_A : int = 20_48
_A : List[Any] = 40_96
_A : Any = 42
_A : List[Any] = os.environ.pop("""PROCESS_TRAIN""", """false""")
_A : Union[str, Any] = {"""null""": 0, """short""": 1, """long""": 2, """yes""": 3, """no""": 4}
def __magic_name__ ( __snake_case : Dict ) -> Optional[Any]:
def choose_first(__snake_case : Any , __snake_case : str=False ):
assert isinstance(__snake_case , __snake_case )
if len(__snake_case ) == 1:
lowercase : List[Any] = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
lowercase : Any = {k: [a[k]] for k in a}
if len(a["start_token"] ) > 0:
break
return a
lowercase : Any = {"id": example["id"]}
lowercase : List[str] = example["annotations"]
lowercase : Optional[int] = annotation["yes_no_answer"]
if 0 in yes_no_answer or 1 in yes_no_answer:
lowercase : Optional[int] = ["yes"] if 1 in yes_no_answer else ["no"]
lowercase : List[Any] = []
lowercase : Dict = []
lowercase : str = ["<cls>"]
else:
lowercase : int = ["short"]
lowercase : Optional[int] = choose_first(annotation["short_answers"] )
if len(out["start_token"] ) == 0:
# answer will be long if short is not available
lowercase : Dict = ["long"]
lowercase : Optional[int] = choose_first(annotation["long_answer"] , is_long_answer=__snake_case )
lowercase : int = []
answer.update(__snake_case )
# disregard some samples
if len(answer["start_token"] ) > 1 or answer["start_token"] == answer["end_token"]:
lowercase : str = True
else:
lowercase : List[str] = False
lowercase : Optional[Any] = ["start_token", "end_token", "start_byte", "end_byte", "text"]
if not all(isinstance(answer[k] , __snake_case ) for k in cols ):
raise ValueError("Issue in ID" , example["id"] )
return answer
def __magic_name__ ( __snake_case : Union[str, Any] , __snake_case : Tuple=False ) -> Union[str, Any]:
lowercase : Tuple = _get_single_answer(__snake_case )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
lowercase : Any = example["document"]["tokens"]
lowercase : List[str] = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
return {
"context": " ".join(__snake_case ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
lowercase : List[Any] = ["start_token", "end_token"]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
lowercase : Optional[int] = example["document"]["tokens"]
lowercase : Union[str, Any] = answer["start_token"]
lowercase : List[str] = answer["end_token"]
lowercase : int = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
lowercase : Dict = " ".join(context[start_token:end_token] )
# checking above code
if assertion:
lowercase : List[str] = doc["is_html"][answer["start_token"] : answer["end_token"]]
lowercase : Any = doc["token"][answer["start_token"] : answer["end_token"]]
lowercase : Dict = " ".join([old[i] for i in range(len(__snake_case ) ) if not is_html[i]] )
if new != old:
print("ID:" , example["id"] )
print("New:" , __snake_case , end="\n" )
print("Old:" , __snake_case , end="\n\n" )
return {
"context": " ".join(__snake_case ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def __magic_name__ ( __snake_case : Optional[int] , __snake_case : Optional[int] , __snake_case : int=2048 , __snake_case : Optional[Any]=4096 , __snake_case : int=True ) -> Tuple:
# overlap will be of doc_stride - q_len
lowercase : List[Any] = get_context_and_ans(__snake_case , assertion=__snake_case )
lowercase : List[Any] = out["answer"]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
lowercase : Tuple = tokenizer(example["question"]["text"] , out["context"] ).input_ids
lowercase : Optional[int] = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
lowercase : List[str] = []
lowercase : Optional[int] = []
lowercase : Any = input_ids[:q_len]
lowercase : List[Any] = range(__snake_case , len(__snake_case ) , max_length - doc_stride )
for i in doc_start_indices:
lowercase : List[Any] = i + max_length - q_len
lowercase : Dict = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["category"][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(__snake_case ),
"end_token": [-100] * len(__snake_case ),
"category": category,
},
}
lowercase : List[str] = out["context"].split()
lowercase : Tuple = splitted_context[answer["end_token"]]
lowercase : List[str] = len(
tokenizer(
" ".join(splitted_context[: answer["start_token"]] ) , add_special_tokens=__snake_case , ).input_ids )
lowercase : Tuple = len(
tokenizer(" ".join(splitted_context[: answer["end_token"]] ) , add_special_tokens=__snake_case ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
lowercase : List[str] = len(tokenizer(__snake_case , add_special_tokens=__snake_case ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
lowercase : Optional[Any] = input_ids[answer["start_token"] : answer["end_token"] + 1] # right & left are inclusive
lowercase : Tuple = answer["start_token"]
lowercase : Optional[Any] = answer["end_token"]
if assertion:
lowercase : str = tokenizer.decode(__snake_case )
if answer["span"] != new:
print("ISSUE IN TOKENIZATION" )
print("OLD:" , answer["span"] )
print("NEW:" , __snake_case , end="\n\n" )
if len(__snake_case ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
lowercase : Dict = input_ids[:q_len]
lowercase : Any = range(__snake_case , len(__snake_case ) , max_length - doc_stride )
lowercase : List[str] = []
lowercase : Any = []
lowercase : Dict = []
lowercase : Tuple = [] # null, yes, no, long, short
for i in doc_start_indices:
lowercase : List[str] = i + max_length - q_len
lowercase : List[Any] = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
lowercase : List[Any] = start_token - i + q_len
lowercase : str = end_token - i + q_len
answers_category.append(answer["category"][0] ) # ["short"] -> "short"
else:
lowercase : List[Any] = -100
lowercase : Optional[int] = -100
answers_category.append("null" )
lowercase : Optional[Any] = inputs[-1][start_token : end_token + 1]
answers_start_token.append(__snake_case )
answers_end_token.append(__snake_case )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("ISSUE in strided for ID:" , example["id"] )
print("New:" , tokenizer.decode(__snake_case ) )
print("Old:" , tokenizer.decode(__snake_case ) , end="\n\n" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def __magic_name__ ( __snake_case : str , __snake_case : str , __snake_case : Optional[Any]=2048 , __snake_case : Optional[int]=4096 , __snake_case : int=False ) -> List[str]:
lowercase : List[str] = get_strided_contexts_and_ans(
__snake_case , __snake_case , doc_stride=__snake_case , max_length=__snake_case , assertion=__snake_case , )
return example
def __magic_name__ ( __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> Union[str, Any]:
with jsonlines.open(__snake_case , "a" ) as writer:
for example in tqdm(__snake_case , total=len(__snake_case ) , desc="Saving samples ... " ):
lowercase : List[str] = example["labels"]
for ids, start, end, cat in zip(
example["input_ids"] , labels["start_token"] , labels["end_token"] , labels["category"] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"input_ids": ids,
"start_token": start,
"end_token": end,
"category": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
_A : Union[str, Any] = load_dataset("""natural_questions""")
_A : Union[str, Any] = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""")
_A : Dict = data["""train""" if PROCESS_TRAIN == """true""" else """validation"""]
_A : int = {
"""tokenizer""": tokenizer,
"""doc_stride""": DOC_STRIDE,
"""max_length""": MAX_LENGTH,
"""assertion""": False,
}
_A : List[str] = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
_A : List[Any] = data.remove_columns(["""annotations""", """document""", """id""", """question"""])
print(data)
np.random.seed(SEED)
_A : str = """nq-training.jsonl""" if PROCESS_TRAIN == """true""" else """nq-validation.jsonl"""
save_to_disk(data, file_name=cache_file_name)
| 518 | 0 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( __a , unittest.TestCase ):
snake_case : Dict = LongformerTokenizer
snake_case : Tuple = True
snake_case : Any = LongformerTokenizerFast
snake_case : Tuple = True
def snake_case_ (self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCAmelCase : Any = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_UpperCAmelCase : Optional[int] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
_UpperCAmelCase : Optional[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_UpperCAmelCase : Tuple = {"""unk_token""": """<unk>"""}
_UpperCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_UpperCAmelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCAmelCase__ ) )
def snake_case_ (self , **lowerCAmelCase__ ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def snake_case_ (self , **lowerCAmelCase__ ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def snake_case_ (self , lowerCAmelCase__ ):
_UpperCAmelCase : Dict = """lower newer"""
_UpperCAmelCase : Tuple = """lower newer"""
return input_text, output_text
def snake_case_ (self ):
_UpperCAmelCase : Optional[Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCAmelCase : str = """lower newer"""
_UpperCAmelCase : List[str] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_UpperCAmelCase : str = tokenizer.tokenize(lowerCAmelCase__ ) # , add_prefix_space=True)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Any = tokens + [tokenizer.unk_token]
_UpperCAmelCase : str = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
def snake_case_ (self ):
_UpperCAmelCase : int = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=lowerCAmelCase__ ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=lowerCAmelCase__ ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def snake_case_ (self ):
_UpperCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
_UpperCAmelCase : str = tokenizer.encode("""sequence builders""" , add_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : Tuple = tokenizer.encode(
"""sequence builders""" , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
_UpperCAmelCase : Dict = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
_UpperCAmelCase : Any = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
_UpperCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def snake_case_ (self ):
_UpperCAmelCase : Tuple = self.get_tokenizer()
_UpperCAmelCase : List[Any] = """Encode this sequence."""
_UpperCAmelCase : Any = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
_UpperCAmelCase : List[str] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : str = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
_UpperCAmelCase : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
_UpperCAmelCase : Union[str, Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : str = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Testing spaces after special tokens
_UpperCAmelCase : int = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )} ) # mask token has a left space
_UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = """Encode <mask> sequence"""
_UpperCAmelCase : Optional[Any] = """Encode <mask>sequence"""
_UpperCAmelCase : Optional[int] = tokenizer.encode(lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = encoded.index(lowerCAmelCase__ )
_UpperCAmelCase : Dict = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : List[str] = tokenizer.encode(lowerCAmelCase__ )
_UpperCAmelCase : int = encoded.index(lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case_ (self ):
pass
def snake_case_ (self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : List[str] = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = """A, <mask> AllenNLP sentence."""
_UpperCAmelCase : Dict = tokenizer_r.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = tokenizer_p.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
_UpperCAmelCase : Any = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
_UpperCAmelCase : Optional[int] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase__ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
lowerCAmelCase__ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def snake_case_ (self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
_UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
_UpperCAmelCase : Dict = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_UpperCAmelCase : Tuple = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , lowerCAmelCase__ )
self.assertEqual(post_processor_state["""add_prefix_space"""] , lowerCAmelCase__ )
self.assertEqual(post_processor_state["""trim_offsets"""] , lowerCAmelCase__ )
def snake_case_ (self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_UpperCAmelCase : str = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
_UpperCAmelCase : Dict = F"{text_of_1_token} {text_of_1_token}"
_UpperCAmelCase : str = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
_UpperCAmelCase : Dict = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ) + 1, len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
_UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
_UpperCAmelCase : Tuple = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ) + 1, len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
_UpperCAmelCase : int = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
_UpperCAmelCase : int = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ), len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
_UpperCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ), len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
_UpperCAmelCase : Any = F" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
_UpperCAmelCase : Dict = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ) + 1, 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
_UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ), 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
_UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
_UpperCAmelCase : int = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ), 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
| 414 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ : Any = logging.get_logger(__name__)
lowerCAmelCase_ : Optional[int] = {
'''vocab_file''': '''vocab.txt''',
'''merges_file''': '''bpe.codes''',
}
lowerCAmelCase_ : Optional[Any] = {
'''vocab_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt''',
},
'''merges_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes''',
},
}
lowerCAmelCase_ : Optional[Any] = {
'''vinai/phobert-base''': 256,
'''vinai/phobert-large''': 256,
}
def __A ( lowerCAmelCase_ ):
_UpperCAmelCase : Optional[int] = set()
_UpperCAmelCase : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCAmelCase : str = char
_UpperCAmelCase : List[Any] = set(lowerCAmelCase_ )
return pairs
class __lowerCAmelCase ( __a ):
snake_case : int = VOCAB_FILES_NAMES
snake_case : List[str] = PRETRAINED_VOCAB_FILES_MAP
snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , **lowerCAmelCase__ , ):
super().__init__(
bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
_UpperCAmelCase : Union[str, Any] = vocab_file
_UpperCAmelCase : Optional[int] = merges_file
_UpperCAmelCase : Optional[int] = {}
_UpperCAmelCase : Optional[Any] = 0
_UpperCAmelCase : Union[str, Any] = 1
_UpperCAmelCase : str = 2
_UpperCAmelCase : List[Any] = 3
self.add_from_file(lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = {v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as merges_handle:
_UpperCAmelCase : int = merges_handle.read().split("""\n""" )[:-1]
_UpperCAmelCase : Optional[Any] = [tuple(merge.split()[:-1] ) for merge in merges]
_UpperCAmelCase : Optional[Any] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
_UpperCAmelCase : Optional[int] = {}
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase : int = [self.cls_token_id]
_UpperCAmelCase : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
_UpperCAmelCase : List[Any] = [self.sep_token_id]
_UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case_ (self ):
return len(self.encoder )
def snake_case_ (self ):
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case_ (self , lowerCAmelCase__ ):
if token in self.cache:
return self.cache[token]
_UpperCAmelCase : Tuple = tuple(lowerCAmelCase__ )
_UpperCAmelCase : Dict = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
_UpperCAmelCase : Optional[Any] = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
_UpperCAmelCase : Optional[Any] = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = bigram
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : List[str] = 0
while i < len(lowerCAmelCase__ ):
try:
_UpperCAmelCase : Tuple = word.index(lowerCAmelCase__ , lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_UpperCAmelCase : Optional[int] = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_UpperCAmelCase : Union[str, Any] = tuple(lowerCAmelCase__ )
_UpperCAmelCase : List[str] = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
_UpperCAmelCase : List[Any] = get_pairs(lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = """@@ """.join(lowerCAmelCase__ )
_UpperCAmelCase : List[str] = word[:-4]
_UpperCAmelCase : Any = word
return word
def snake_case_ (self , lowerCAmelCase__ ):
_UpperCAmelCase : Tuple = []
_UpperCAmelCase : Dict = re.findall(r"""\S+\n?""" , lowerCAmelCase__ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase__ ).split(""" """ ) ) )
return split_tokens
def snake_case_ (self , lowerCAmelCase__ ):
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token ) )
def snake_case_ (self , lowerCAmelCase__ ):
return self.decoder.get(lowerCAmelCase__ , self.unk_token )
def snake_case_ (self , lowerCAmelCase__ ):
_UpperCAmelCase : int = """ """.join(lowerCAmelCase__ ).replace("""@@ """ , """""" ).strip()
return out_string
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
_UpperCAmelCase : Tuple = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_UpperCAmelCase : Optional[int] = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
if os.path.abspath(self.merges_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.merges_file , lowerCAmelCase__ )
return out_vocab_file, out_merge_file
def snake_case_ (self , lowerCAmelCase__ ):
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
try:
with open(lowerCAmelCase__ , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(lowerCAmelCase__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F"Incorrect encoding detected in {f}, please rebuild the dataset" )
return
_UpperCAmelCase : List[Any] = f.readlines()
for lineTmp in lines:
_UpperCAmelCase : Union[str, Any] = lineTmp.strip()
_UpperCAmelCase : int = line.rfind(""" """ )
if idx == -1:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt>'""" )
_UpperCAmelCase : List[str] = line[:idx]
_UpperCAmelCase : Optional[int] = len(self.encoder )
| 414 | 1 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class a :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=32 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[str] = parent
__SCREAMING_SNAKE_CASE: Union[str, Any] = batch_size
__SCREAMING_SNAKE_CASE: Union[str, Any] = seq_length
__SCREAMING_SNAKE_CASE: List[str] = is_training
__SCREAMING_SNAKE_CASE: Any = use_input_mask
__SCREAMING_SNAKE_CASE: Optional[int] = use_token_type_ids
__SCREAMING_SNAKE_CASE: Dict = use_labels
__SCREAMING_SNAKE_CASE: List[Any] = vocab_size
__SCREAMING_SNAKE_CASE: Tuple = hidden_size
__SCREAMING_SNAKE_CASE: Optional[Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE: str = num_attention_heads
__SCREAMING_SNAKE_CASE: Union[str, Any] = intermediate_size
__SCREAMING_SNAKE_CASE: Optional[int] = hidden_act
__SCREAMING_SNAKE_CASE: List[str] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE: Optional[int] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE: int = max_position_embeddings
__SCREAMING_SNAKE_CASE: str = type_vocab_size
__SCREAMING_SNAKE_CASE: Union[str, Any] = type_sequence_label_size
__SCREAMING_SNAKE_CASE: str = initializer_range
__SCREAMING_SNAKE_CASE: str = num_labels
__SCREAMING_SNAKE_CASE: Any = num_choices
__SCREAMING_SNAKE_CASE: Optional[Any] = scope
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE: int = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE: Dict = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE: Dict = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE: List[str] = None
__SCREAMING_SNAKE_CASE: Optional[int] = None
__SCREAMING_SNAKE_CASE: Dict = None
if self.use_labels:
__SCREAMING_SNAKE_CASE: Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE: Tuple = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE: str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , use_stable_embedding=_lowerCAmelCase , )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = OpenLlamaModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE: int = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: str = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Tuple = True
__SCREAMING_SNAKE_CASE: List[str] = OpenLlamaModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE: Tuple = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , )
__SCREAMING_SNAKE_CASE: Optional[Any] = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , )
__SCREAMING_SNAKE_CASE: Optional[int] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: str = OpenLlamaForCausalLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE: Optional[int] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = True
__SCREAMING_SNAKE_CASE: Tuple = True
__SCREAMING_SNAKE_CASE: Any = OpenLlamaForCausalLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
# first forward pass
__SCREAMING_SNAKE_CASE: int = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , use_cache=_lowerCAmelCase , )
__SCREAMING_SNAKE_CASE: int = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE: Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
__SCREAMING_SNAKE_CASE: List[str] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__SCREAMING_SNAKE_CASE: str = torch.cat([input_ids, next_tokens] , dim=-1 )
__SCREAMING_SNAKE_CASE: int = torch.cat([input_mask, next_mask] , dim=-1 )
__SCREAMING_SNAKE_CASE: List[Any] = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , )['''hidden_states'''][0]
__SCREAMING_SNAKE_CASE: Tuple = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , )['''hidden_states'''][0]
# select random slice
__SCREAMING_SNAKE_CASE: Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__SCREAMING_SNAKE_CASE: Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
__SCREAMING_SNAKE_CASE: int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Tuple = self.prepare_config_and_inputs()
(
__SCREAMING_SNAKE_CASE
): Optional[Any] = config_and_inputs
__SCREAMING_SNAKE_CASE: List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( __lowercase ,__lowercase ,__lowercase ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ : Tuple = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE__ : Any = (OpenLlamaForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : Any = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
SCREAMING_SNAKE_CASE__ : Optional[int] = False
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[str] = OpenLlamaModelTester(self )
__SCREAMING_SNAKE_CASE: Optional[int] = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def snake_case_ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__SCREAMING_SNAKE_CASE: Any = type
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE: Any = 3
__SCREAMING_SNAKE_CASE: List[Any] = input_dict['''input_ids''']
__SCREAMING_SNAKE_CASE: Union[str, Any] = input_ids.ne(1 ).to(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: List[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__SCREAMING_SNAKE_CASE: List[Any] = OpenLlamaForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE: List[str] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE: List[Any] = 3
__SCREAMING_SNAKE_CASE: List[Any] = '''single_label_classification'''
__SCREAMING_SNAKE_CASE: Any = input_dict['''input_ids''']
__SCREAMING_SNAKE_CASE: Optional[Any] = input_ids.ne(1 ).to(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: List[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__SCREAMING_SNAKE_CASE: str = OpenLlamaForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE: Any = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Any = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE: Any = 3
__SCREAMING_SNAKE_CASE: str = '''multi_label_classification'''
__SCREAMING_SNAKE_CASE: Dict = input_dict['''input_ids''']
__SCREAMING_SNAKE_CASE: Union[str, Any] = input_ids.ne(1 ).to(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: int = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__SCREAMING_SNAKE_CASE: Dict = OpenLlamaForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE: List[Any] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' )
def snake_case_ ( self ):
"""simple docstring"""
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def snake_case_ ( self , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE: Tuple = ids_tensor([1, 10] , config.vocab_size )
__SCREAMING_SNAKE_CASE: Tuple = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__SCREAMING_SNAKE_CASE: Any = OpenLlamaModel(_lowerCAmelCase )
original_model.to(_lowerCAmelCase )
original_model.eval()
__SCREAMING_SNAKE_CASE: str = original_model(_lowerCAmelCase ).last_hidden_state
__SCREAMING_SNAKE_CASE: Optional[Any] = original_model(_lowerCAmelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__SCREAMING_SNAKE_CASE: List[str] = {'''type''': scaling_type, '''factor''': 10.0}
__SCREAMING_SNAKE_CASE: Any = OpenLlamaModel(_lowerCAmelCase )
scaled_model.to(_lowerCAmelCase )
scaled_model.eval()
__SCREAMING_SNAKE_CASE: Dict = scaled_model(_lowerCAmelCase ).last_hidden_state
__SCREAMING_SNAKE_CASE: Any = scaled_model(_lowerCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-5 ) )
| 721 |
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class a ( __lowercase ):
SCREAMING_SNAKE_CASE__ : List[Any] = (EulerDiscreteScheduler,)
SCREAMING_SNAKE_CASE__ : Any = 10
def snake_case_ ( self , **_lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: str = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**_lowerCAmelCase )
return config
def snake_case_ ( self ):
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_lowerCAmelCase , beta_end=_lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: str = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE: Optional[int] = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE: List[str] = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
__SCREAMING_SNAKE_CASE: Dict = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE: Optional[Any] = self.dummy_model()
__SCREAMING_SNAKE_CASE: Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
__SCREAMING_SNAKE_CASE: Optional[int] = sample.to(_lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE: Any = scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Union[str, Any] = model(_lowerCAmelCase , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[Any] = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: int = output.prev_sample
__SCREAMING_SNAKE_CASE: Tuple = torch.sum(torch.abs(_lowerCAmelCase ) )
__SCREAMING_SNAKE_CASE: List[Any] = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Any = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE: Union[str, Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
__SCREAMING_SNAKE_CASE: str = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
__SCREAMING_SNAKE_CASE: str = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE: Optional[int] = self.dummy_model()
__SCREAMING_SNAKE_CASE: Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
__SCREAMING_SNAKE_CASE: Any = sample.to(_lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE: Tuple = scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Tuple = model(_lowerCAmelCase , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Union[str, Any] = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[int] = output.prev_sample
__SCREAMING_SNAKE_CASE: List[Any] = torch.sum(torch.abs(_lowerCAmelCase ) )
__SCREAMING_SNAKE_CASE: Any = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.26_76e-06 ) < 1e-3
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Any = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE: Tuple = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE: List[str] = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: List[str] = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE: Optional[Any] = self.dummy_model()
__SCREAMING_SNAKE_CASE: int = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__SCREAMING_SNAKE_CASE: Any = sample.to(_lowerCAmelCase )
for t in scheduler.timesteps:
__SCREAMING_SNAKE_CASE: Optional[Any] = scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Tuple = model(_lowerCAmelCase , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[int] = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: int = output.prev_sample
__SCREAMING_SNAKE_CASE: List[str] = torch.sum(torch.abs(_lowerCAmelCase ) )
__SCREAMING_SNAKE_CASE: Dict = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE: str = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE: Union[str, Any] = scheduler_class(**_lowerCAmelCase , use_karras_sigmas=_lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: int = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE: Any = self.dummy_model()
__SCREAMING_SNAKE_CASE: str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__SCREAMING_SNAKE_CASE: Optional[int] = sample.to(_lowerCAmelCase )
for t in scheduler.timesteps:
__SCREAMING_SNAKE_CASE: Optional[Any] = scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE: List[Any] = model(_lowerCAmelCase , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Any = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: List[str] = output.prev_sample
__SCREAMING_SNAKE_CASE: int = torch.sum(torch.abs(_lowerCAmelCase ) )
__SCREAMING_SNAKE_CASE: Optional[int] = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 124.52299499511719 ) < 1e-2
assert abs(result_mean.item() - 0.16213932633399963 ) < 1e-3
| 146 | 0 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__lowerCamelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__UpperCAmelCase )
__lowerCamelCase = -1
__lowerCamelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__UpperCAmelCase )
__lowerCamelCase = model.generate(__UpperCAmelCase , max_new_tokens=10 , do_sample=__UpperCAmelCase )
__lowerCamelCase = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
__lowerCamelCase = TextStreamer(__UpperCAmelCase )
model.generate(__UpperCAmelCase , max_new_tokens=10 , do_sample=__UpperCAmelCase , streamer=__UpperCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__lowerCamelCase = cs.out[:-1]
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__lowerCamelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__UpperCAmelCase )
__lowerCamelCase = -1
__lowerCamelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__UpperCAmelCase )
__lowerCamelCase = model.generate(__UpperCAmelCase , max_new_tokens=10 , do_sample=__UpperCAmelCase )
__lowerCamelCase = tokenizer.decode(greedy_ids[0] )
__lowerCamelCase = TextIteratorStreamer(__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__lowerCamelCase = Thread(target=model.generate , kwargs=__UpperCAmelCase )
thread.start()
__lowerCamelCase = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__lowerCamelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__UpperCAmelCase )
__lowerCamelCase = -1
__lowerCamelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__UpperCAmelCase )
__lowerCamelCase = model.generate(__UpperCAmelCase , max_new_tokens=10 , do_sample=__UpperCAmelCase )
__lowerCamelCase = greedy_ids[:, input_ids.shape[1] :]
__lowerCamelCase = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
__lowerCamelCase = TextStreamer(__UpperCAmelCase , skip_prompt=__UpperCAmelCase )
model.generate(__UpperCAmelCase , max_new_tokens=10 , do_sample=__UpperCAmelCase , streamer=__UpperCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__lowerCamelCase = cs.out[:-1]
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
__lowerCamelCase = AutoTokenizer.from_pretrained('''distilgpt2''' )
__lowerCamelCase = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(__UpperCAmelCase )
__lowerCamelCase = -1
__lowerCamelCase = torch.ones((1, 5) , device=__UpperCAmelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
__lowerCamelCase = TextStreamer(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
model.generate(__UpperCAmelCase , max_new_tokens=1 , do_sample=__UpperCAmelCase , streamer=__UpperCAmelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
__lowerCamelCase = cs.out[:-1] # Remove the final "\n"
__lowerCamelCase = tokenizer(__UpperCAmelCase , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__lowerCamelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__UpperCAmelCase )
__lowerCamelCase = -1
__lowerCamelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__UpperCAmelCase )
__lowerCamelCase = TextIteratorStreamer(__UpperCAmelCase , timeout=0.001 )
__lowerCamelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__lowerCamelCase = Thread(target=model.generate , kwargs=__UpperCAmelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__UpperCAmelCase ):
__lowerCamelCase = ''''''
for new_text in streamer:
streamer_text += new_text
| 175 |
def a__ ( _UpperCamelCase : int ):
__lowerCamelCase = int(_UpperCamelCase )
if decimal in (0, 1): # Exit cases for the recursion
return str(_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = divmod(_UpperCamelCase ,2 )
return binary_recursive(_UpperCamelCase ) + str(_UpperCamelCase )
def a__ ( _UpperCamelCase : str ):
__lowerCamelCase = str(_UpperCamelCase ).strip()
if not number:
raise ValueError('''No input value was provided''' )
__lowerCamelCase = '''-''' if number.startswith('''-''' ) else ''''''
__lowerCamelCase = number.lstrip('''-''' )
if not number.isnumeric():
raise ValueError('''Input value is not an integer''' )
return F"""{negative}0b{binary_recursive(int(_UpperCamelCase ) )}"""
if __name__ == "__main__":
from doctest import testmod
testmod()
| 175 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''facebook/xmod-base''': '''https://huggingface.co/facebook/xmod-base/resolve/main/config.json''',
'''facebook/xmod-large-prenorm''': '''https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json''',
'''facebook/xmod-base-13-125k''': '''https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-125k''': '''https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-195k''': '''https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json''',
'''facebook/xmod-base-60-125k''': '''https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json''',
'''facebook/xmod-base-60-265k''': '''https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json''',
'''facebook/xmod-base-75-125k''': '''https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json''',
'''facebook/xmod-base-75-269k''': '''https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json''',
}
class _lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = '''xmod'''
def __init__( self , __SCREAMING_SNAKE_CASE=3_0522 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE="absolute" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=("en_XX",) , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : int =vocab_size
snake_case__ : List[str] =hidden_size
snake_case__ : Optional[Any] =num_hidden_layers
snake_case__ : Union[str, Any] =num_attention_heads
snake_case__ : Tuple =hidden_act
snake_case__ : str =intermediate_size
snake_case__ : Dict =hidden_dropout_prob
snake_case__ : int =attention_probs_dropout_prob
snake_case__ : Dict =max_position_embeddings
snake_case__ : Dict =type_vocab_size
snake_case__ : Optional[int] =initializer_range
snake_case__ : Tuple =layer_norm_eps
snake_case__ : str =position_embedding_type
snake_case__ : List[str] =use_cache
snake_case__ : Optional[Any] =classifier_dropout
snake_case__ : Tuple =pre_norm
snake_case__ : Dict =adapter_reduction_factor
snake_case__ : int =adapter_layer_norm
snake_case__ : str =adapter_reuse_layer_norm
snake_case__ : Any =ln_before_adapter
snake_case__ : Any =list(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict =default_language
class _lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
@property
def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
snake_case__ : Union[str, Any] ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
snake_case__ : Tuple ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 703 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
snake_case__ : int =torch.nn.Linear(10 , 10 )
snake_case__ : int =torch.optim.SGD(model.parameters() , 0.1 )
snake_case__ : str =Accelerator()
snake_case__ : Any =accelerator.prepare(__SCREAMING_SNAKE_CASE )
try:
pickle.loads(pickle.dumps(__SCREAMING_SNAKE_CASE ) )
except Exception as e:
self.fail(f'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 408 | 0 |
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _A ( __lowercase , __lowercase=False ):
"""simple docstring"""
try:
lowerCamelCase__ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowerCamelCase__ = default
else:
# KEY is set, convert it to True or False.
try:
lowerCamelCase__ = strtobool(__lowercase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"""If set, {key} must be yes or no.""" )
return _value
__magic_name__ = parse_flag_from_env("""RUN_SLOW""", default=False)
__magic_name__ = parse_flag_from_env("""RUN_REMOTE""", default=False)
__magic_name__ = parse_flag_from_env("""RUN_LOCAL""", default=True)
__magic_name__ = parse_flag_from_env("""RUN_PACKAGED""", default=True)
# Compression
__magic_name__ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="""test requires lz4""")
__magic_name__ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="""test requires py7zr""")
__magic_name__ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="""test requires zstandard""")
# Audio
__magic_name__ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("""soundfile""") is None or version.parse(importlib_metadata.version("""soundfile""")) < version.parse("""0.12.0"""),
reason="""test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; """,
)
# Beam
__magic_name__ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("""0.3.2"""),
reason="""test requires apache-beam and a compatible dill version""",
)
# Dill-cloudpickle compatibility
__magic_name__ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("""0.3.2"""),
reason="""test requires dill>0.3.2 for cloudpickle compatibility""",
)
# Windows
__magic_name__ = pytest.mark.skipif(
sys.platform == """win32""",
reason="""test should not be run on Windows""",
)
def _A ( __lowercase ):
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
lowerCamelCase__ = unittest.skip("""test requires faiss""" )(__lowercase )
return test_case
def _A ( __lowercase ):
"""simple docstring"""
try:
import regex # noqa
except ImportError:
lowerCamelCase__ = unittest.skip("""test requires regex""" )(__lowercase )
return test_case
def _A ( __lowercase ):
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
lowerCamelCase__ = unittest.skip("""test requires elasticsearch""" )(__lowercase )
return test_case
def _A ( __lowercase ):
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
lowerCamelCase__ = unittest.skip("""test requires sqlalchemy""" )(__lowercase )
return test_case
def _A ( __lowercase ):
"""simple docstring"""
if not config.TORCH_AVAILABLE:
lowerCamelCase__ = unittest.skip("""test requires PyTorch""" )(__lowercase )
return test_case
def _A ( __lowercase ):
"""simple docstring"""
if not config.TF_AVAILABLE:
lowerCamelCase__ = unittest.skip("""test requires TensorFlow""" )(__lowercase )
return test_case
def _A ( __lowercase ):
"""simple docstring"""
if not config.JAX_AVAILABLE:
lowerCamelCase__ = unittest.skip("""test requires JAX""" )(__lowercase )
return test_case
def _A ( __lowercase ):
"""simple docstring"""
if not config.PIL_AVAILABLE:
lowerCamelCase__ = unittest.skip("""test requires Pillow""" )(__lowercase )
return test_case
def _A ( __lowercase ):
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("""test requires transformers""" )(__lowercase )
else:
return test_case
def _A ( __lowercase ):
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("""test requires tiktoken""" )(__lowercase )
else:
return test_case
def _A ( __lowercase ):
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("""test requires spacy""" )(__lowercase )
else:
return test_case
def _A ( __lowercase ):
"""simple docstring"""
def _require_spacy_model(__lowercase ):
try:
import spacy # noqa F401
spacy.load(__lowercase )
except ImportError:
return unittest.skip("""test requires spacy""" )(__lowercase )
except OSError:
return unittest.skip("""test requires spacy model '{}'""".format(__lowercase ) )(__lowercase )
else:
return test_case
return _require_spacy_model
def _A ( __lowercase ):
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("""test requires pyspark""" )(__lowercase )
else:
return test_case
def _A ( __lowercase ):
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("""test requires joblibspark""" )(__lowercase )
else:
return test_case
def _A ( __lowercase ):
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
lowerCamelCase__ = unittest.skip("""test is slow""" )(__lowercase )
return test_case
def _A ( __lowercase ):
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
lowerCamelCase__ = unittest.skip("""test is local""" )(__lowercase )
return test_case
def _A ( __lowercase ):
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
lowerCamelCase__ = unittest.skip("""test is packaged""" )(__lowercase )
return test_case
def _A ( __lowercase ):
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
lowerCamelCase__ = unittest.skip("""test requires remote""" )(__lowercase )
return test_case
def _A ( *__lowercase ):
"""simple docstring"""
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(__lowercase ) and name.startswith("""test""" ):
for decorator in decorators:
lowerCamelCase__ = decorator(__lowercase )
setattr(cls , __lowercase , __lowercase )
return cls
return decorate
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
pass
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
snake_case = 0
snake_case = 1
snake_case = 2
@contextmanager
def _A ( __lowercase=OfflineSimulationMode.CONNECTION_FAILS , __lowercase=1e-16 ):
"""simple docstring"""
lowerCamelCase__ = requests.Session().request
def timeout_request(__lowercase , __lowercase , __lowercase , **__lowercase ):
# Change the url to an invalid url so that the connection hangs
lowerCamelCase__ = """https://10.255.255.1"""
if kwargs.get("""timeout""" ) is None:
raise RequestWouldHangIndefinitelyError(
f"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
lowerCamelCase__ = timeout
try:
return online_request(__lowercase , __lowercase , **__lowercase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
lowerCamelCase__ = url
lowerCamelCase__ = e.args[0]
lowerCamelCase__ = (max_retry_error.args[0].replace("""10.255.255.1""" , f"""OfflineMock[{url}]""" ),)
lowerCamelCase__ = (max_retry_error,)
raise
def raise_connection_error(__lowercase , __lowercase , **__lowercase ):
raise requests.ConnectionError("""Offline mode is enabled.""" , request=__lowercase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("""requests.Session.send""" , __lowercase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("""requests.Session.request""" , __lowercase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("""datasets.config.HF_DATASETS_OFFLINE""" , __lowercase ):
yield
else:
raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" )
@contextmanager
def _A ( *__lowercase , **__lowercase ):
"""simple docstring"""
lowerCamelCase__ = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__lowercase , **__lowercase ) as tmp_dir:
try:
os.chdir(__lowercase )
yield
finally:
os.chdir(__lowercase )
@contextmanager
def _A ( ):
"""simple docstring"""
import gc
gc.collect()
lowerCamelCase__ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _A ( ):
"""simple docstring"""
import gc
gc.collect()
lowerCamelCase__ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _A ( __lowercase , __lowercase ):
"""simple docstring"""
return deepcopy(__lowercase ).integers(0 , 100 , 10 ).tolist() == deepcopy(__lowercase ).integers(0 , 100 , 10 ).tolist()
def _A ( __lowercase ):
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(__lowercase , *__lowercase , **__lowercase ):
try:
return func(*__lowercase , **__lowercase )
except HTTPError as err:
if str(__lowercase ).startswith("""500""" ) or str(__lowercase ).startswith("""502""" ):
pytest.xfail(str(__lowercase ) )
raise err
return decorator.decorator(_wrapper , __lowercase )
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int ):
lowerCamelCase__ = returncode
lowerCamelCase__ = stdout
lowerCamelCase__ = stderr
async def _A ( __lowercase , __lowercase ):
"""simple docstring"""
while True:
lowerCamelCase__ = await stream.readline()
if line:
callback(__lowercase )
else:
break
async def _A ( __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=False , __lowercase=False ):
"""simple docstring"""
if echo:
print("""\nRunning: """ , """ """.join(__lowercase ) )
lowerCamelCase__ = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__lowercase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__lowercase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowerCamelCase__ = []
lowerCamelCase__ = []
def tee(__lowercase , __lowercase , __lowercase , __lowercase="" ):
lowerCamelCase__ = line.decode("""utf-8""" ).rstrip()
sink.append(__lowercase )
if not quiet:
print(__lowercase , __lowercase , file=__lowercase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda __lowercase : tee(__lowercase , __lowercase , sys.stdout , label="""stdout:""" ) ),
_read_stream(p.stderr , lambda __lowercase : tee(__lowercase , __lowercase , sys.stderr , label="""stderr:""" ) ),
] , timeout=__lowercase , )
return _RunOutput(await p.wait() , __lowercase , __lowercase )
def _A ( __lowercase , __lowercase=None , __lowercase=None , __lowercase=180 , __lowercase=False , __lowercase=True ):
"""simple docstring"""
lowerCamelCase__ = asyncio.get_event_loop()
lowerCamelCase__ = loop.run_until_complete(
_stream_subprocess(__lowercase , env=__lowercase , stdin=__lowercase , timeout=__lowercase , quiet=__lowercase , echo=__lowercase ) )
lowerCamelCase__ = """ """.join(__lowercase )
if result.returncode > 0:
lowerCamelCase__ = """\n""".join(result.stderr )
raise RuntimeError(
f"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
f"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"""'{cmd_str}' produced no output.""" )
return result
def _A ( ):
"""simple docstring"""
lowerCamelCase__ = os.environ.get("""PYTEST_XDIST_WORKER""" , """gw0""" )
lowerCamelCase__ = re.sub(r"""^gw""" , """""" , __lowercase , 0 , re.M )
return int(__lowercase )
def _A ( ):
"""simple docstring"""
lowerCamelCase__ = 2_9500
lowerCamelCase__ = pytest_xdist_worker_id()
return port + uniq_delta
| 129 |
"""simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def _A ( __lowercase , __lowercase , __lowercase = None ):
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse("""0.11.0""" ).release:
# old versions of hfh don't url-encode the file path
lowerCamelCase__ = quote(__lowercase )
return hfh.hf_hub_url(__lowercase , __lowercase , repo_type="""dataset""" , revision=__lowercase )
| 129 | 1 |
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str ,lowerCamelCase__ : str ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : int = 32 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Union[int, float] = 1 / 255 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Optional[Union[float, List[float]]] = [0.48145466, 0.4578275, 0.40821073] ,lowerCamelCase__ : Optional[Union[float, List[float]]] = [0.26862954, 0.26130258, 0.27577711] ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : str=7 ,lowerCamelCase__ : Any=30 ,lowerCamelCase__ : str=400 ,lowerCamelCase__ : Optional[Any]=3 ,) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size if size is not None else {"""shortest_edge""": 288}
SCREAMING_SNAKE_CASE = size_divisor
SCREAMING_SNAKE_CASE = do_rescale
SCREAMING_SNAKE_CASE = rescale_factor
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = do_center_crop
SCREAMING_SNAKE_CASE = image_mean
SCREAMING_SNAKE_CASE = image_std
SCREAMING_SNAKE_CASE = do_pad
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[Any]:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Union[str, Any]=False ) -> Any:
'''simple docstring'''
if not batched:
SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""]
SCREAMING_SNAKE_CASE = image_inputs[0]
if isinstance(lowerCamelCase__ ,Image.Image ):
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = image.size
else:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = image.shape[1], image.shape[2]
SCREAMING_SNAKE_CASE = size / min(lowerCamelCase__ ,lowerCamelCase__ )
if h < w:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = size, scale * w
else:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = scale * h, size
SCREAMING_SNAKE_CASE = int((1333 / 800) * size )
if max(lowerCamelCase__ ,lowerCamelCase__ ) > max_size:
SCREAMING_SNAKE_CASE = max_size / max(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = newh * scale
SCREAMING_SNAKE_CASE = neww * scale
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = int(newh + 0.5 ), int(neww + 0.5 )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
SCREAMING_SNAKE_CASE = []
for image in image_inputs:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE = max(lowerCamelCase__ ,key=lambda lowerCamelCase__ : item[0] )[0]
SCREAMING_SNAKE_CASE = max(lowerCamelCase__ ,key=lambda lowerCamelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : Optional[int] = BridgeTowerImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BridgeTowerImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ ,"""image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""size""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""size_divisor""" ) )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ ,Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(lowerCamelCase__ )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase__ ,return_tensors="""pt""" ).pixel_values
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(lowerCamelCase__ ,batched=lowerCamelCase__ )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCamelCase__ ,numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ ,np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(lowerCamelCase__ )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase__ ,return_tensors="""pt""" ).pixel_values
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(lowerCamelCase__ ,batched=lowerCamelCase__ )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCamelCase__ ,torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ ,torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(lowerCamelCase__ )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase__ ,return_tensors="""pt""" ).pixel_values
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(lowerCamelCase__ ,batched=lowerCamelCase__ )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
| 718 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : str = VQModel
__snake_case : Optional[Any] = "sample"
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ,lowerCamelCase__ : Optional[Any]=(32, 32) ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCamelCase__ )
return {"sample": image}
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
return (3, 32, 32)
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
return (3, 32, 32)
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 3,
}
SCREAMING_SNAKE_CASE = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : int ) -> str:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = VQModel.from_pretrained("""fusing/vqgan-dummy""" ,output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertEqual(len(loading_info["""missing_keys"""] ) ,0 )
model.to(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = VQModel.from_pretrained("""fusing/vqgan-dummy""" )
model.to(lowerCamelCase__ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
SCREAMING_SNAKE_CASE = torch.randn(1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size )
SCREAMING_SNAKE_CASE = image.to(lowerCamelCase__ )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ).sample
SCREAMING_SNAKE_CASE = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(lowerCamelCase__ ,lowerCamelCase__ ,atol=1e-3 ) )
| 116 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""microsoft/unispeech-sat-base-100h-libri-ft""": (
"""https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"""
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Union[str, Any] = "unispeech-sat"
def __init__( self : Tuple , __lowerCAmelCase : Tuple=32 , __lowerCAmelCase : List[Any]=7_68 , __lowerCAmelCase : Optional[Any]=12 , __lowerCAmelCase : Optional[Any]=12 , __lowerCAmelCase : Dict=30_72 , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Optional[Any]=0.02 , __lowerCAmelCase : Any=1E-5 , __lowerCAmelCase : List[Any]="group" , __lowerCAmelCase : Optional[Any]="gelu" , __lowerCAmelCase : List[Any]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __lowerCAmelCase : Any=(5, 2, 2, 2, 2, 2, 2) , __lowerCAmelCase : int=(10, 3, 3, 3, 3, 2, 2) , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : int=1_28 , __lowerCAmelCase : Tuple=16 , __lowerCAmelCase : int=False , __lowerCAmelCase : int=True , __lowerCAmelCase : Union[str, Any]=0.05 , __lowerCAmelCase : str=10 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : Any=10 , __lowerCAmelCase : str=0 , __lowerCAmelCase : List[str]=3_20 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Dict=1_00 , __lowerCAmelCase : Optional[int]=2_56 , __lowerCAmelCase : List[Any]=2_56 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : List[Any]="mean" , __lowerCAmelCase : Dict=False , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : Union[str, Any]=2_56 , __lowerCAmelCase : int=(5_12, 5_12, 5_12, 5_12, 15_00) , __lowerCAmelCase : Optional[Any]=(5, 3, 3, 1, 1) , __lowerCAmelCase : str=(1, 2, 3, 1, 1) , __lowerCAmelCase : Union[str, Any]=5_12 , __lowerCAmelCase : List[str]=0 , __lowerCAmelCase : Dict=1 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Dict=5_04 , **__lowerCAmelCase : Optional[Any] , ) -> int:
super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase )
_A = hidden_size
_A = feat_extract_norm
_A = feat_extract_activation
_A = list(__lowerCAmelCase )
_A = list(__lowerCAmelCase )
_A = list(__lowerCAmelCase )
_A = conv_bias
_A = num_conv_pos_embeddings
_A = num_conv_pos_embedding_groups
_A = len(self.conv_dim )
_A = num_hidden_layers
_A = intermediate_size
_A = hidden_act
_A = num_attention_heads
_A = hidden_dropout
_A = attention_dropout
_A = activation_dropout
_A = feat_proj_dropout
_A = final_dropout
_A = layerdrop
_A = layer_norm_eps
_A = initializer_range
_A = vocab_size
_A = num_clusters
_A = do_stable_layer_norm
_A = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_A = apply_spec_augment
_A = mask_time_prob
_A = mask_time_length
_A = mask_time_min_masks
_A = mask_feature_prob
_A = mask_feature_length
_A = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_A = num_codevectors_per_group
_A = num_codevector_groups
_A = contrastive_logits_temperature
_A = feat_quantizer_dropout
_A = num_negatives
_A = codevector_dim
_A = proj_codevector_dim
_A = diversity_loss_weight
# ctc loss
_A = ctc_loss_reduction
_A = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_A = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_A = list(__lowerCAmelCase )
_A = list(__lowerCAmelCase )
_A = list(__lowerCAmelCase )
_A = xvector_output_dim
@property
def snake_case_ ( self : Tuple ) -> Dict:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 2 |
import itertools
import math
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE_ ( ) -> Dict:
_A = 2
while True:
if is_prime(_snake_case ):
yield num
num += 1
def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 10_001 ) -> int:
return next(itertools.islice(prime_generator() , nth - 1 , _snake_case ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 2 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase__ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = ["pixel_values"]
def __init__( self : int , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = True , **__a : str , ) -> None:
super().__init__(**__a )
_UpperCamelCase : Optional[Any] = size if size is not None else {"height": 384, "width": 384}
_UpperCamelCase : str = get_size_dict(__a , default_to_square=__a )
_UpperCamelCase : Optional[Any] = do_resize
_UpperCamelCase : List[str] = size
_UpperCamelCase : Dict = resample
_UpperCamelCase : List[str] = do_rescale
_UpperCamelCase : str = rescale_factor
_UpperCamelCase : Tuple = do_normalize
_UpperCamelCase : Any = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_UpperCamelCase : str = image_std if image_std is not None else OPENAI_CLIP_STD
_UpperCamelCase : str = do_convert_rgb
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> np.ndarray:
_UpperCamelCase : int = get_size_dict(__a , default_to_square=__a )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
_UpperCamelCase : str = (size["height"], size["width"])
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : np.ndarray , __a : Union[int, float] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[Any] , ) -> List[str]:
return rescale(__a , scale=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Tuple , ) -> np.ndarray:
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : ImageInput , __a : Optional[bool] = None , __a : Optional[Dict[str, int]] = None , __a : PILImageResampling = None , __a : Optional[bool] = None , __a : Optional[float] = None , __a : Optional[bool] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[str, TensorType]] = None , __a : bool = None , __a : ChannelDimension = ChannelDimension.FIRST , **__a : Tuple , ) -> PIL.Image.Image:
_UpperCamelCase : Optional[int] = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase : Dict = resample if resample is not None else self.resample
_UpperCamelCase : Dict = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase : int = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase : List[Any] = image_std if image_std is not None else self.image_std
_UpperCamelCase : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_UpperCamelCase : Union[str, Any] = size if size is not None else self.size
_UpperCamelCase : Union[str, Any] = get_size_dict(__a , default_to_square=__a )
_UpperCamelCase : List[Any] = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_UpperCamelCase : List[Any] = [convert_to_rgb(__a ) for image in images]
# All transformations expect numpy arrays.
_UpperCamelCase : List[str] = [to_numpy_array(__a ) for image in images]
if do_resize:
_UpperCamelCase : List[str] = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_rescale:
_UpperCamelCase : int = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
_UpperCamelCase : int = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
_UpperCamelCase : Union[str, Any] = [to_channel_dimension_format(__a , __a ) for image in images]
_UpperCamelCase : Union[str, Any] = BatchFeature(data={"pixel_values": images} , tensor_type=__a )
return encoded_outputs
| 51 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"configuration_owlvit": [
"OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"OwlViTConfig",
"OwlViTOnnxConfig",
"OwlViTTextConfig",
"OwlViTVisionConfig",
],
"processing_owlvit": ["OwlViTProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["OwlViTFeatureExtractor"]
lowerCamelCase__ = ["OwlViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OwlViTModel",
"OwlViTPreTrainedModel",
"OwlViTTextModel",
"OwlViTVisionModel",
"OwlViTForObjectDetection",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51 | 1 |
"""simple docstring"""
import random
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Dict:
"""simple docstring"""
_UpperCamelCase = a[left_index]
_UpperCamelCase = left_index + 1
for j in range(left_index + 1, __snake_case ):
if a[j] < pivot:
_UpperCamelCase , _UpperCamelCase = a[i], a[j]
i += 1
_UpperCamelCase , _UpperCamelCase = a[i - 1], a[left_index]
return i - 1
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Tuple:
"""simple docstring"""
if left < right:
_UpperCamelCase = random.randint(__snake_case, right - 1 )
_UpperCamelCase , _UpperCamelCase = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_UpperCamelCase = partition(__snake_case, __snake_case, __snake_case )
quick_sort_random(
__snake_case, __snake_case, __snake_case ) # recursive quicksort to the left of the pivot point
quick_sort_random(
__snake_case, pivot_index + 1, __snake_case ) # recursive quicksort to the right of the pivot point
def lowerCamelCase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase = input('''Enter numbers separated by a comma:\n''' ).strip()
_UpperCamelCase = [int(__snake_case ) for item in user_input.split(''',''' )]
quick_sort_random(__snake_case, 0, len(__snake_case ) )
print(__snake_case )
if __name__ == "__main__":
main()
| 19 |
import warnings
from ..trainer import Trainer
from ..utils import logging
__A : Any = logging.get_logger(__name__)
class A_ (a_ ):
def __init__( self , _A=None , **_A ):
'''simple docstring'''
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , _A , )
super().__init__(args=_A , **_A )
| 130 | 0 |
'''simple docstring'''
from math import factorial, radians
def _snake_case ( lowercase , lowercase = 1_8 , lowercase = 1_0 ) -> float:
__a : Union[str, Any] = angle_in_degrees - ((angle_in_degrees // 3_6_0.0) * 3_6_0.0)
# Converting from degrees to radians
__a : Optional[Any] = radians(lowercase )
__a : Union[str, Any] = angle_in_radians
__a : Optional[int] = 3
__a : Optional[int] = -1
for _ in range(lowercase ):
result += (b * (angle_in_radians**a)) / factorial(lowercase )
__a : Tuple = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowercase , lowercase )
if __name__ == "__main__":
__import__('doctest').testmod() | 697 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = ""
lowercase__ = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ):
'''simple docstring'''
super().__init__(self , **__UpperCamelCase )
__a : int = repo_info
__a : int = token
__a : Any = None
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.dir_cache is None:
__a : Union[str, Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__a : List[str] = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(__UpperCamelCase ): {"""name""": str(__UpperCamelCase ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = "rb" , **__UpperCamelCase , ):
'''simple docstring'''
if not isinstance(self.repo_info , __UpperCamelCase ):
raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
__a : Any = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def __lowerCamelCase ( self , __UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
self._get_dirs()
__a : str = self._strip_protocol(__UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=False , **__UpperCamelCase ):
'''simple docstring'''
self._get_dirs()
__a : int = PurePosixPath(path.strip("""/""" ) )
__a : List[str] = {}
for p, f in self.dir_cache.items():
__a : str = PurePosixPath(p.strip("""/""" ) )
__a : Optional[int] = p.parent
if root == path:
__a : List[str] = f
__a : str = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out ) | 697 | 1 |
'''simple docstring'''
import sys
from collections import defaultdict
class _a :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
return self.node_position[vertex]
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = pos
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
SCREAMING_SNAKE_CASE : Optional[Any] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
SCREAMING_SNAKE_CASE : str = 2 * start + 1
else:
SCREAMING_SNAKE_CASE : Optional[int] = 2 * start + 2
if heap[smallest_child] < heap[start]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = heap[smallest_child], positions[smallest_child]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = (
heap[start],
positions[start],
)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = temp, tempa
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child], self.get_position(positions[start] ) )
self.set_position(positions[start], A )
self.top_to_bottom(A, A, A, A )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = position[index]
while index != 0:
SCREAMING_SNAKE_CASE : Optional[Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
SCREAMING_SNAKE_CASE : List[Any] = heap[parent]
SCREAMING_SNAKE_CASE : List[str] = position[parent]
self.set_position(position[parent], A )
else:
SCREAMING_SNAKE_CASE : Optional[int] = val
SCREAMING_SNAKE_CASE : List[Any] = temp
self.set_position(A, A )
break
SCREAMING_SNAKE_CASE : Union[str, Any] = parent
else:
SCREAMING_SNAKE_CASE : Optional[int] = val
SCREAMING_SNAKE_CASE : Any = temp
self.set_position(A, 0 )
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = len(A ) // 2 - 1
for i in range(A, -1, -1 ):
self.top_to_bottom(A, A, len(A ), A )
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = positions[0]
SCREAMING_SNAKE_CASE : Any = sys.maxsize
self.top_to_bottom(A, 0, len(A ), A )
return temp
def lowercase__( __UpperCamelCase: Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = Heap()
SCREAMING_SNAKE_CASE : Optional[Any] = [0] * len(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Tuple = [-1] * len(__UpperCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
SCREAMING_SNAKE_CASE : str = [] # Heap of Distance of vertices from their neighboring vertex
SCREAMING_SNAKE_CASE : Tuple = []
for vertex in range(len(__UpperCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(__UpperCamelCase )
heap.node_position.append(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = []
SCREAMING_SNAKE_CASE : int = 1
SCREAMING_SNAKE_CASE : Union[str, Any] = sys.maxsize
for neighbor, distance in adjacency_list[0]:
SCREAMING_SNAKE_CASE : Any = 0
SCREAMING_SNAKE_CASE : Optional[int] = distance
heap.heapify(__UpperCamelCase ,__UpperCamelCase )
for _ in range(1 ,len(__UpperCamelCase ) ):
SCREAMING_SNAKE_CASE : List[Any] = heap.delete_minimum(__UpperCamelCase ,__UpperCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
SCREAMING_SNAKE_CASE : Optional[int] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__UpperCamelCase )]
):
SCREAMING_SNAKE_CASE : Optional[int] = distance
heap.bottom_to_top(
__UpperCamelCase ,heap.get_position(__UpperCamelCase ) ,__UpperCamelCase ,__UpperCamelCase )
SCREAMING_SNAKE_CASE : Dict = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
UpperCamelCase_ = int(input("Enter number of edges: ").strip())
UpperCamelCase_ = defaultdict(list)
for _ in range(edges_number):
UpperCamelCase_ = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 28 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: int = 1_00_00_00 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = [i - 1 for i in range(limit + 1 )]
for i in range(2 ,limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i ,limit + 1 ,__UpperCamelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 28 | 1 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
lowerCamelCase__ = sys.version_info >= (3, 10)
def _SCREAMING_SNAKE_CASE( snake_case_ : Optional[int]=None , snake_case_ : str=None ) ->str:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=snake_case_ )
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
snake_case_ = 42
snake_case_ = field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
snake_case_ = False
snake_case_ = True
snake_case_ = None
class _lowerCAmelCase ( __A ):
'''simple docstring'''
snake_case_ = 'titi'
snake_case_ = 'toto'
class _lowerCAmelCase ( __A ):
'''simple docstring'''
snake_case_ = 'titi'
snake_case_ = 'toto'
snake_case_ = 42
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
snake_case_ = "toto"
def __lowercase ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = BasicEnum(self.foo )
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
snake_case_ = "toto"
def __lowercase ( self : int ) -> List[str]:
'''simple docstring'''
_lowercase : str = MixedTypeEnum(self.foo )
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
snake_case_ = None
snake_case_ = field(default=__A , metadata={'help': 'help message'} )
snake_case_ = None
snake_case_ = list_field(default=[] )
snake_case_ = list_field(default=[] )
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
snake_case_ = list_field(default=[] )
snake_case_ = list_field(default=[1, 2, 3] )
snake_case_ = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
snake_case_ = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
snake_case_ = field()
snake_case_ = field()
snake_case_ = field()
def __lowercase ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
_lowercase : Tuple = BasicEnum(self.required_enum )
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
snake_case_ = 42
snake_case_ = field()
snake_case_ = None
snake_case_ = field(default='toto' , metadata={'help': 'help message'} )
snake_case_ = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
snake_case_ = False
snake_case_ = True
snake_case_ = None
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
snake_case_ = None
snake_case_ = field(default=__A , metadata={'help': 'help message'} )
snake_case_ = None
snake_case_ = list_field(default=[] )
snake_case_ = list_field(default=[] )
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowercase ( self : List[Any] , UpperCamelCase_ : argparse.ArgumentParser , UpperCamelCase_ : argparse.ArgumentParser ) -> str:
'''simple docstring'''
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
_lowercase : Tuple = {k: v for k, v in vars(UpperCamelCase_ ).items() if k != '''container'''}
_lowercase : str = {k: v for k, v in vars(UpperCamelCase_ ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , UpperCamelCase_ ) and yy.get('''choices''' , UpperCamelCase_ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](UpperCamelCase_ ) , yy['''type'''](UpperCamelCase_ ) )
del xx["type"], yy["type"]
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def __lowercase ( self : Dict ) -> Dict:
'''simple docstring'''
_lowercase : Any = HfArgumentParser(UpperCamelCase_ )
_lowercase : Any = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=UpperCamelCase_ , required=UpperCamelCase_ )
expected.add_argument('''--bar''' , type=UpperCamelCase_ , required=UpperCamelCase_ )
expected.add_argument('''--baz''' , type=UpperCamelCase_ , required=UpperCamelCase_ )
expected.add_argument('''--flag''' , type=UpperCamelCase_ , default=UpperCamelCase_ , const=UpperCamelCase_ , nargs='''?''' )
self.argparsersEqual(UpperCamelCase_ , UpperCamelCase_ )
_lowercase : str = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((_lowercase) , ) : Tuple = parser.parse_args_into_dataclasses(UpperCamelCase_ , look_for_args_file=UpperCamelCase_ )
self.assertFalse(example.flag )
def __lowercase ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
_lowercase : Dict = HfArgumentParser(UpperCamelCase_ )
_lowercase : Dict = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=42 , type=UpperCamelCase_ )
expected.add_argument('''--baz''' , default='''toto''' , type=UpperCamelCase_ , help='''help message''' )
self.argparsersEqual(UpperCamelCase_ , UpperCamelCase_ )
def __lowercase ( self : Tuple ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=UpperCamelCase_ , default=UpperCamelCase_ , const=UpperCamelCase_ , nargs='''?''' )
expected.add_argument('''--baz''' , type=UpperCamelCase_ , default=UpperCamelCase_ , const=UpperCamelCase_ , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=UpperCamelCase_ , dest='''baz''' )
expected.add_argument('''--opt''' , type=UpperCamelCase_ , default=UpperCamelCase_ )
_lowercase : List[Any] = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(UpperCamelCase_ )
for dataclass_type in dataclass_types:
_lowercase : Dict = HfArgumentParser(UpperCamelCase_ )
self.argparsersEqual(UpperCamelCase_ , UpperCamelCase_ )
_lowercase : Union[str, Any] = parser.parse_args([] )
self.assertEqual(UpperCamelCase_ , Namespace(foo=UpperCamelCase_ , baz=UpperCamelCase_ , opt=UpperCamelCase_ ) )
_lowercase : Any = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(UpperCamelCase_ , Namespace(foo=UpperCamelCase_ , baz=UpperCamelCase_ , opt=UpperCamelCase_ ) )
_lowercase : Union[str, Any] = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(UpperCamelCase_ , Namespace(foo=UpperCamelCase_ , baz=UpperCamelCase_ , opt=UpperCamelCase_ ) )
_lowercase : List[str] = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(UpperCamelCase_ , Namespace(foo=UpperCamelCase_ , baz=UpperCamelCase_ , opt=UpperCamelCase_ ) )
_lowercase : int = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(UpperCamelCase_ , Namespace(foo=UpperCamelCase_ , baz=UpperCamelCase_ , opt=UpperCamelCase_ ) )
def __lowercase ( self : Any ) -> Dict:
'''simple docstring'''
_lowercase : Optional[int] = HfArgumentParser(UpperCamelCase_ )
_lowercase : int = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(UpperCamelCase_ , UpperCamelCase_ )
_lowercase : Union[str, Any] = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
_lowercase : Union[str, Any] = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
_lowercase : Optional[int] = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
_lowercase : int = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
_lowercase : Dict = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
_lowercase : Optional[int] = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def __lowercase ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
snake_case_ = "toto"
_lowercase : str = HfArgumentParser(UpperCamelCase_ )
_lowercase : str = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(UpperCamelCase_ , UpperCamelCase_ )
_lowercase : Dict = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
_lowercase : int = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
_lowercase : Optional[Any] = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
def __lowercase ( self : Any ) -> str:
'''simple docstring'''
_lowercase : int = HfArgumentParser(UpperCamelCase_ )
_lowercase : Any = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=UpperCamelCase_ )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=UpperCamelCase_ )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=UpperCamelCase_ )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=UpperCamelCase_ )
self.argparsersEqual(UpperCamelCase_ , UpperCamelCase_ )
_lowercase : Optional[Any] = parser.parse_args([] )
self.assertEqual(
UpperCamelCase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
_lowercase : List[str] = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(UpperCamelCase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def __lowercase ( self : int ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[int] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=UpperCamelCase_ , type=UpperCamelCase_ )
expected.add_argument('''--bar''' , default=UpperCamelCase_ , type=UpperCamelCase_ , help='''help message''' )
expected.add_argument('''--baz''' , default=UpperCamelCase_ , type=UpperCamelCase_ )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=UpperCamelCase_ )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=UpperCamelCase_ )
_lowercase : Union[str, Any] = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(UpperCamelCase_ )
for dataclass_type in dataclass_types:
_lowercase : Tuple = HfArgumentParser(UpperCamelCase_ )
self.argparsersEqual(UpperCamelCase_ , UpperCamelCase_ )
_lowercase : Tuple = parser.parse_args([] )
self.assertEqual(UpperCamelCase_ , Namespace(foo=UpperCamelCase_ , bar=UpperCamelCase_ , baz=UpperCamelCase_ , ces=[] , des=[] ) )
_lowercase : List[str] = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(UpperCamelCase_ , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def __lowercase ( self : str ) -> str:
'''simple docstring'''
_lowercase : List[Any] = HfArgumentParser(UpperCamelCase_ )
_lowercase : List[Any] = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=UpperCamelCase_ , required=UpperCamelCase_ )
expected.add_argument('''--required_str''' , type=UpperCamelCase_ , required=UpperCamelCase_ )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=UpperCamelCase_ , )
self.argparsersEqual(UpperCamelCase_ , UpperCamelCase_ )
def __lowercase ( self : Optional[int] ) -> Any:
'''simple docstring'''
_lowercase : Dict = HfArgumentParser(UpperCamelCase_ )
_lowercase : Optional[int] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=UpperCamelCase_ , required=UpperCamelCase_ )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=UpperCamelCase_ , )
expected.add_argument('''--opt''' , type=UpperCamelCase_ , default=UpperCamelCase_ )
expected.add_argument('''--baz''' , default='''toto''' , type=UpperCamelCase_ , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=UpperCamelCase_ )
self.argparsersEqual(UpperCamelCase_ , UpperCamelCase_ )
def __lowercase ( self : str ) -> Dict:
'''simple docstring'''
_lowercase : Dict = HfArgumentParser(UpperCamelCase_ )
_lowercase : Tuple = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
_lowercase : Optional[int] = parser.parse_dict(UpperCamelCase_ )[0]
_lowercase : Optional[int] = BasicExample(**UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def __lowercase ( self : Optional[Any] ) -> int:
'''simple docstring'''
_lowercase : Union[str, Any] = HfArgumentParser(UpperCamelCase_ )
_lowercase : str = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(UpperCamelCase_ , parser.parse_dict , UpperCamelCase_ , allow_extra_keys=UpperCamelCase_ )
def __lowercase ( self : Any ) -> Any:
'''simple docstring'''
_lowercase : str = HfArgumentParser(UpperCamelCase_ )
_lowercase : Optional[Any] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_lowercase : Tuple = os.path.join(UpperCamelCase_ , '''temp_json''' )
os.mkdir(UpperCamelCase_ )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
_lowercase : Any = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
_lowercase : Optional[int] = BasicExample(**UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def __lowercase ( self : Dict ) -> Any:
'''simple docstring'''
_lowercase : int = HfArgumentParser(UpperCamelCase_ )
_lowercase : Tuple = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_lowercase : Any = os.path.join(UpperCamelCase_ , '''temp_yaml''' )
os.mkdir(UpperCamelCase_ )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(UpperCamelCase_ , UpperCamelCase_ )
_lowercase : int = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
_lowercase : int = BasicExample(**UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def __lowercase ( self : Tuple ) -> Tuple:
'''simple docstring'''
_lowercase : str = HfArgumentParser(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
| 411 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
lowerCamelCase__ = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 411 | 1 |
import os
def UpperCamelCase ( _A : str = "input.txt" )-> List[Any]:
"""simple docstring"""
with open(os.path.join(os.path.dirname(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) ) as input_file:
A__ = [
[int(_SCREAMING_SNAKE_CASE ) for element in line.split("," )]
for line in input_file.readlines()
]
A__ = len(_SCREAMING_SNAKE_CASE )
A__ = len(matrix[0] )
A__ = [[-1 for _ in range(_SCREAMING_SNAKE_CASE )] for _ in range(_SCREAMING_SNAKE_CASE )]
for i in range(_SCREAMING_SNAKE_CASE ):
A__ = matrix[i][0]
for j in range(1 , _SCREAMING_SNAKE_CASE ):
for i in range(_SCREAMING_SNAKE_CASE ):
A__ = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , _SCREAMING_SNAKE_CASE ):
A__ = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
A__ = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 491 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class _a :
"""simple docstring"""
def __init__( self : Any , __UpperCamelCase : List[Any] , )->Union[str, Any]:
_UpperCAmelCase = parent
_UpperCAmelCase = 1_3
_UpperCAmelCase = 7
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = 2
_UpperCAmelCase = 9_9
_UpperCAmelCase = 0
_UpperCAmelCase = 3_2
_UpperCAmelCase = 2
_UpperCAmelCase = 4
_UpperCAmelCase = 0.1
_UpperCAmelCase = 0.1
_UpperCAmelCase = 5_1_2
_UpperCAmelCase = 1_6
_UpperCAmelCase = 2
_UpperCAmelCase = 0.0_2
_UpperCAmelCase = 3
_UpperCAmelCase = 4
_UpperCAmelCase = '''last'''
_UpperCAmelCase = True
_UpperCAmelCase = None
_UpperCAmelCase = 0
def lowercase__ ( self : Tuple )->Union[str, Any]:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
_UpperCAmelCase = None
if self.use_input_lengths:
_UpperCAmelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowercase__ ( self : Dict , __UpperCamelCase : Any , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : Optional[Any] , )->Optional[Any]:
_UpperCAmelCase = TFFlaubertModel(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
_UpperCAmelCase = [input_ids, input_mask]
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : Any , __UpperCamelCase : Any , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , )->Optional[Any]:
_UpperCAmelCase = TFFlaubertWithLMHeadModel(__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Dict , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] , )->Optional[int]:
_UpperCAmelCase = TFFlaubertForQuestionAnsweringSimple(__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''lengths''': input_lengths}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple , )->Dict:
_UpperCAmelCase = TFFlaubertForSequenceClassification(__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''lengths''': input_lengths}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : int , __UpperCamelCase : Any , __UpperCamelCase : int , __UpperCamelCase : List[str] , )->Optional[int]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFFlaubertForTokenClassification(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : str , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : str , )->int:
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = TFFlaubertForMultipleChoice(config=__UpperCamelCase )
_UpperCAmelCase = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : Union[str, Any] )->Tuple:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''langs''': token_type_ids,
'''lengths''': input_lengths,
}
return config, inputs_dict
@require_tf
class _a ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase__ = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCamelCase__ = (
{
"""feature-extraction""": TFFlaubertModel,
"""fill-mask""": TFFlaubertWithLMHeadModel,
"""question-answering""": TFFlaubertForQuestionAnsweringSimple,
"""text-classification""": TFFlaubertForSequenceClassification,
"""token-classification""": TFFlaubertForTokenClassification,
"""zero-shot""": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
def lowercase__ ( self : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[int] )->Union[str, Any]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowercase__ ( self : str )->Any:
_UpperCAmelCase = TFFlaubertModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , emb_dim=3_7 )
def lowercase__ ( self : Optional[int] )->Tuple:
self.config_tester.run_common_tests()
def lowercase__ ( self : str )->Optional[int]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__UpperCamelCase )
def lowercase__ ( self : List[Any] )->Dict:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__UpperCamelCase )
def lowercase__ ( self : Union[str, Any] )->Optional[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__UpperCamelCase )
def lowercase__ ( self : str )->Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__UpperCamelCase )
def lowercase__ ( self : Tuple )->Optional[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*__UpperCamelCase )
def lowercase__ ( self : List[str] )->List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*__UpperCamelCase )
@slow
def lowercase__ ( self : int )->Any:
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = TFFlaubertModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@require_tf
@require_sentencepiece
@require_tokenizers
class _a ( unittest.TestCase):
"""simple docstring"""
@slow
def lowercase__ ( self : Any )->int:
_UpperCAmelCase = TFFlaubertModel.from_pretrained('''jplu/tf-flaubert-small-cased''' )
_UpperCAmelCase = tf.convert_to_tensor(
[[0, 1_5_8, 7_3_5, 2_5_9_2, 1_4_2_4, 6_7_2_7, 8_2, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
_UpperCAmelCase = model(__UpperCamelCase )[0]
_UpperCAmelCase = tf.TensorShape((1, 8, 5_1_2) )
self.assertEqual(output.shape , __UpperCamelCase )
# compare the actual values for a slice.
_UpperCAmelCase = tf.convert_to_tensor(
[
[
[-1.8_7_6_8_7_7_3, -1.5_6_6_5_5_5, 0.2_7_0_7_2_4_1_8],
[-1.6_9_2_0_0_3_8, -0.5_8_7_3_5_0_5, 1.9_3_2_9_5_9_9],
[-2.9_5_6_3_9_8_5, -1.6_9_9_3_8_3_5, 1.7_9_7_2_0_5_2],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 602 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Tuple = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 'vivit'
def __init__( self , snake_case_=2_24 , snake_case_=32 , snake_case_=[2, 16, 16] , snake_case_=3 , snake_case_=7_68 , snake_case_=12 , snake_case_=12 , snake_case_=30_72 , snake_case_="gelu_fast" , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.02 , snake_case_=1E-06 , snake_case_=True , **snake_case_ , ):
lowercase =hidden_size
lowercase =num_hidden_layers
lowercase =num_attention_heads
lowercase =intermediate_size
lowercase =hidden_act
lowercase =hidden_dropout_prob
lowercase =attention_probs_dropout_prob
lowercase =initializer_range
lowercase =layer_norm_eps
lowercase =image_size
lowercase =num_frames
lowercase =tubelet_size
lowercase =num_channels
lowercase =qkv_bias
super().__init__(**snake_case_ )
| 145 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
_UpperCAmelCase : Union[str, Any] = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def UpperCamelCase ( lowercase_ : List[str] ) -> Dict:
'''simple docstring'''
lowercase =torch.load(lowercase_ , map_location='''cpu''' )
return sd
def UpperCamelCase ( lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Optional[Any]=rename_keys_prefix ) -> Tuple:
'''simple docstring'''
lowercase =OrderedDict()
lowercase =torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
lowercase =key
for name_pair in rename_keys_prefix:
lowercase =new_key.replace(name_pair[0] , name_pair[1] )
lowercase =d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
lowercase =new_d['''cls.predictions.bias''']
return new_d
@torch.no_grad()
def UpperCamelCase ( lowercase_ : List[Any] , lowercase_ : List[str] ) -> List[Any]:
'''simple docstring'''
assert (
checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS
), f'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'
# Get Config
if "pre" in checkpoint_path:
lowercase ='''pretraining'''
if "vcr" in checkpoint_path:
lowercase ={'''visual_embedding_dim''': 5_1_2}
elif "vqa_advanced" in checkpoint_path:
lowercase ={'''visual_embedding_dim''': 2_0_4_8}
elif "vqa" in checkpoint_path:
lowercase ={'''visual_embedding_dim''': 2_0_4_8}
elif "nlvr" in checkpoint_path:
lowercase ={'''visual_embedding_dim''': 1_0_2_4}
else:
raise NotImplementedError(f'No implementation found for `{checkpoint_path}`.' )
else:
if "vcr" in checkpoint_path:
lowercase ={'''visual_embedding_dim''': 5_1_2}
lowercase ='''multichoice'''
elif "vqa_advanced" in checkpoint_path:
lowercase ={'''visual_embedding_dim''': 2_0_4_8}
lowercase ='''vqa_advanced'''
elif "vqa" in checkpoint_path:
lowercase ={'''visual_embedding_dim''': 2_0_4_8, '''num_labels''': 3_1_2_9}
lowercase ='''vqa'''
elif "nlvr" in checkpoint_path:
lowercase ={
'''visual_embedding_dim''': 1_0_2_4,
'''num_labels''': 2,
}
lowercase ='''nlvr'''
lowercase =VisualBertConfig(**lowercase_ )
# Load State Dict
lowercase =load_state_dict(lowercase_ )
lowercase =get_new_dict(lowercase_ , lowercase_ )
if model_type == "pretraining":
lowercase =VisualBertForPreTraining(lowercase_ )
elif model_type == "vqa":
lowercase =VisualBertForQuestionAnswering(lowercase_ )
elif model_type == "nlvr":
lowercase =VisualBertForVisualReasoning(lowercase_ )
elif model_type == "multichoice":
lowercase =VisualBertForMultipleChoice(lowercase_ )
model.load_state_dict(lowercase_ )
# Save Checkpoints
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
if __name__ == "__main__":
_UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
_UpperCAmelCase : List[str] = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 145 | 1 |
"""simple docstring"""
import math
from collections.abc import Callable
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = xa
__lowerCAmelCase = xa
while True:
if x_n == x_na or function(_lowerCAmelCase ) == function(_lowerCAmelCase ):
raise ZeroDivisionError("""float division by zero, could not find root""" )
__lowerCAmelCase = x_na - (
function(_lowerCAmelCase ) / ((function(_lowerCAmelCase ) - function(_lowerCAmelCase )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
__lowerCAmelCase = x_na
__lowerCAmelCase = x_na
def lowercase (_lowerCAmelCase ):
return math.pow(_lowerCAmelCase , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 465 |
"""simple docstring"""
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class lowerCAmelCase_ ( enum.Enum ):
'''simple docstring'''
_snake_case = 0
_snake_case = 1
@add_end_docstrings(A__ )
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = '''generated'''
def __init__( self , *snake_case_ , **snake_case_ ) -> Optional[int]:
super().__init__(*snake_case_ , **snake_case_ )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def A__ ( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , **snake_case_ , ) -> Union[str, Any]:
__lowerCAmelCase = {}
if truncation is not None:
__lowerCAmelCase = truncation
__lowerCAmelCase = generate_kwargs
__lowerCAmelCase = {}
if return_tensors is not None and return_type is None:
__lowerCAmelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
__lowerCAmelCase = return_type
if clean_up_tokenization_spaces is not None:
__lowerCAmelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
__lowerCAmelCase = self.tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
if len(snake_case_ ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
__lowerCAmelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def A__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> Any:
return True
def A__ ( self , *snake_case_ , snake_case_ ) -> Dict:
__lowerCAmelCase = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , snake_case_ ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
__lowerCAmelCase = ([prefix + arg for arg in args[0]],)
__lowerCAmelCase = True
elif isinstance(args[0] , snake_case_ ):
__lowerCAmelCase = (prefix + args[0],)
__lowerCAmelCase = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
__lowerCAmelCase = self.tokenizer(*snake_case_ , padding=snake_case_ , truncation=snake_case_ , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *snake_case_ , **snake_case_ ) -> Dict:
__lowerCAmelCase = super().__call__(*snake_case_ , **snake_case_ )
if (
isinstance(args[0] , snake_case_ )
and all(isinstance(snake_case_ , snake_case_ ) for el in args[0] )
and all(len(snake_case_ ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def A__ ( self , snake_case_ , snake_case_=TruncationStrategy.DO_NOT_TRUNCATE , **snake_case_ ) -> Tuple:
__lowerCAmelCase = self._parse_and_tokenize(snake_case_ , truncation=snake_case_ , **snake_case_ )
return inputs
def A__ ( self , snake_case_ , **snake_case_ ) -> Union[str, Any]:
if self.framework == "pt":
__lowerCAmelCase , __lowerCAmelCase = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
__lowerCAmelCase , __lowerCAmelCase = tf.shape(model_inputs["""input_ids"""] ).numpy()
__lowerCAmelCase = generate_kwargs.get("""min_length""" , self.model.config.min_length )
__lowerCAmelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(snake_case_ , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
__lowerCAmelCase = self.model.generate(**snake_case_ , **snake_case_ )
__lowerCAmelCase = output_ids.shape[0]
if self.framework == "pt":
__lowerCAmelCase = output_ids.reshape(snake_case_ , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
__lowerCAmelCase = tf.reshape(snake_case_ , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def A__ ( self , snake_case_ , snake_case_=ReturnType.TEXT , snake_case_=False ) -> Dict:
__lowerCAmelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
__lowerCAmelCase = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
__lowerCAmelCase = {
f"""{self.return_name}_text""": self.tokenizer.decode(
snake_case_ , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ , )
}
records.append(snake_case_ )
return records
@add_end_docstrings(A__ )
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = '''summary'''
def __call__( self , *snake_case_ , **snake_case_ ) -> Tuple:
return super().__call__(*snake_case_ , **snake_case_ )
def A__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> bool:
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(A__ )
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = '''translation'''
def A__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def A__ ( self , *snake_case_ , snake_case_=TruncationStrategy.DO_NOT_TRUNCATE , snake_case_=None , snake_case_=None ) -> List[Any]:
if getattr(self.tokenizer , """_build_translation_inputs""" , snake_case_ ):
return self.tokenizer._build_translation_inputs(
*snake_case_ , return_tensors=self.framework , truncation=snake_case_ , src_lang=snake_case_ , tgt_lang=snake_case_ )
else:
return super()._parse_and_tokenize(*snake_case_ , truncation=snake_case_ )
def A__ ( self , snake_case_=None , snake_case_=None , **snake_case_ ) -> List[Any]:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = super()._sanitize_parameters(**snake_case_ )
if src_lang is not None:
__lowerCAmelCase = src_lang
if tgt_lang is not None:
__lowerCAmelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
__lowerCAmelCase = kwargs.get("""task""" , self.task )
__lowerCAmelCase = task.split("""_""" )
if task and len(snake_case_ ) == 4:
# translation, XX, to YY
__lowerCAmelCase = items[1]
__lowerCAmelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *snake_case_ , **snake_case_ ) -> List[Any]:
return super().__call__(*snake_case_ , **snake_case_ )
| 465 | 1 |
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> set:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = set()
# edges = list of graph's edges
_UpperCamelCase : Union[str, Any] = get_edges(lowercase_ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
_UpperCamelCase, _UpperCamelCase : str = edges.pop()
chosen_vertices.add(lowercase_ )
chosen_vertices.add(lowercase_ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(lowercase_ )
return chosen_vertices
def lowercase__ ( lowercase_ ) -> set:
"""simple docstring"""
_UpperCamelCase : List[str] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 51 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = LEDConfig
SCREAMING_SNAKE_CASE__ :str = {}
SCREAMING_SNAKE_CASE__ :List[str] = "gelu"
def __init__( self : List[Any] , __a : Union[str, Any] , __a : List[Any]=13 , __a : int=7 , __a : str=True , __a : Any=False , __a : str=99 , __a : str=32 , __a : Union[str, Any]=2 , __a : Optional[Any]=4 , __a : List[Any]=37 , __a : List[Any]=0.1 , __a : Tuple=0.1 , __a : Dict=20 , __a : str=2 , __a : Dict=1 , __a : Any=0 , __a : List[Any]=4 , ) -> List[Any]:
_UpperCamelCase : Optional[Any] = parent
_UpperCamelCase : List[str] = batch_size
_UpperCamelCase : str = seq_length
_UpperCamelCase : str = is_training
_UpperCamelCase : Any = use_labels
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : List[str] = hidden_size
_UpperCamelCase : Optional[Any] = num_hidden_layers
_UpperCamelCase : Dict = num_attention_heads
_UpperCamelCase : Optional[Any] = intermediate_size
_UpperCamelCase : int = hidden_dropout_prob
_UpperCamelCase : Dict = attention_probs_dropout_prob
_UpperCamelCase : str = max_position_embeddings
_UpperCamelCase : int = eos_token_id
_UpperCamelCase : Dict = pad_token_id
_UpperCamelCase : Optional[Any] = bos_token_id
_UpperCamelCase : str = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_UpperCamelCase : List[str] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_UpperCamelCase : int = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_UpperCamelCase : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCamelCase : Tuple = tf.concat([input_ids, eos_tensor] , axis=1 )
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_UpperCamelCase : Dict = prepare_led_inputs_dict(__a , __a , __a )
_UpperCamelCase : Union[str, Any] = tf.concat(
[tf.zeros_like(__a )[:, :-1], tf.ones_like(__a )[:, -1:]] , axis=-1 , )
_UpperCamelCase : Union[str, Any] = global_attention_mask
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[Any] , __a : int ) -> Tuple:
_UpperCamelCase : Tuple = TFLEDModel(config=__a ).get_decoder()
_UpperCamelCase : Tuple = inputs_dict["input_ids"]
_UpperCamelCase : int = input_ids[:1, :]
_UpperCamelCase : List[str] = inputs_dict["attention_mask"][:1, :]
_UpperCamelCase : List[Any] = 1
# first forward pass
_UpperCamelCase : Any = model(__a , attention_mask=__a , use_cache=__a )
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCamelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCamelCase : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_UpperCamelCase : List[str] = tf.concat([input_ids, next_tokens] , axis=-1 )
_UpperCamelCase : Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_UpperCamelCase : Tuple = model(__a , attention_mask=__a )[0]
_UpperCamelCase : int = model(__a , attention_mask=__a , past_key_values=__a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_UpperCamelCase : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_UpperCamelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx]
_UpperCamelCase : Optional[int] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1e-3 )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_=None ,lowercase_=None ,) -> Dict:
"""simple docstring"""
if attention_mask is None:
_UpperCamelCase : str = tf.cast(tf.math.not_equal(lowercase_ ,config.pad_token_id ) ,tf.inta )
if decoder_attention_mask is None:
_UpperCamelCase : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ),
] ,axis=-1 ,)
if head_mask is None:
_UpperCamelCase : List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCamelCase : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ :List[str] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ :List[str] = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ :Tuple = True
SCREAMING_SNAKE_CASE__ :str = False
SCREAMING_SNAKE_CASE__ :Optional[Any] = False
SCREAMING_SNAKE_CASE__ :int = False
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
_UpperCamelCase : int = TFLEDModelTester(self )
_UpperCamelCase : Any = ConfigTester(self , config_class=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
_UpperCamelCase, _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : Optional[int] = tf.zeros_like(inputs_dict["attention_mask"] )
_UpperCamelCase : Union[str, Any] = 2
_UpperCamelCase : str = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
_UpperCamelCase : Dict = True
_UpperCamelCase : str = self.model_tester.seq_length
_UpperCamelCase : Union[str, Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__a : Optional[int] ):
_UpperCamelCase : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__a : Optional[Any] ):
_UpperCamelCase : Union[str, Any] = [t.numpy() for t in outputs.encoder_attentions]
_UpperCamelCase : List[Any] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_UpperCamelCase : Dict = True
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : int = False
_UpperCamelCase : Optional[int] = model_class(__a )
_UpperCamelCase : int = model(self._prepare_for_class(__a , __a ) )
_UpperCamelCase : Any = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
_UpperCamelCase : Optional[Any] = model_class(__a )
_UpperCamelCase : List[Any] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_UpperCamelCase : int = True
_UpperCamelCase : Tuple = model_class(__a )
_UpperCamelCase : str = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
_UpperCamelCase : Any = True
_UpperCamelCase : List[str] = True
_UpperCamelCase : Tuple = model_class(__a )
_UpperCamelCase : int = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
pass
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
# TODO: Head-masking not yet implement
pass
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
return tf.constant(lowercase_ ,dtype=tf.intaa )
lowerCamelCase__ = 1E-4
@slow
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
_UpperCamelCase : Any = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
_UpperCamelCase : int = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Tuple = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Optional[Any] = prepare_led_inputs_dict(model.config , __a , __a )
_UpperCamelCase : Optional[int] = model(**__a )[0]
_UpperCamelCase : Optional[int] = (1, 1024, 768)
self.assertEqual(output.shape , __a )
# change to expected output here
_UpperCamelCase : Tuple = tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-3 )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : Optional[int] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
_UpperCamelCase : Optional[int] = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : List[str] = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Optional[Any] = prepare_led_inputs_dict(model.config , __a , __a )
_UpperCamelCase : Union[str, Any] = model(**__a )[0]
_UpperCamelCase : int = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , __a )
# change to expected output here
_UpperCamelCase : Optional[int] = tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-3 , rtol=1e-3 )
| 51 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Any = data
lowerCAmelCase__ :Node | None = None
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
lowerCAmelCase__ :str = None
lowerCAmelCase__ :Optional[int] = None
def __iter__( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self.head
while self.head:
yield node.data
lowerCAmelCase__ :Tuple = node.next
if node == self.head:
break
def __len__( self ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self ):
'''simple docstring'''
return "->".join(str(__UpperCAmelCase ) for item in iter(self ) )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
self.insert_nth(len(self ) , __UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
self.insert_nth(0 , __UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if index < 0 or index > len(self ):
raise IndexError('list index out of range.' )
lowerCAmelCase__ :Optional[Any] = Node(__UpperCAmelCase )
if self.head is None:
lowerCAmelCase__ :int = new_node # first node points itself
lowerCAmelCase__ :int = new_node
elif index == 0: # insert at head
lowerCAmelCase__ :str = self.head
lowerCAmelCase__ :Any = new_node
else:
lowerCAmelCase__ :Union[str, Any] = self.head
for _ in range(index - 1 ):
lowerCAmelCase__ :Optional[int] = temp.next
lowerCAmelCase__ :List[str] = temp.next
lowerCAmelCase__ :Optional[Any] = new_node
if index == len(self ) - 1: # insert at tail
lowerCAmelCase__ :str = new_node
def snake_case ( self ):
'''simple docstring'''
return self.delete_nth(0 )
def snake_case ( self ):
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def snake_case ( self , __UpperCAmelCase = 0 ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise IndexError('list index out of range.' )
lowerCAmelCase__ :List[str] = self.head
if self.head == self.tail: # just one node
lowerCAmelCase__ :Union[str, Any] = None
elif index == 0: # delete head node
lowerCAmelCase__ :List[str] = self.tail.next.next
lowerCAmelCase__ :Tuple = self.head.next
else:
lowerCAmelCase__ :Dict = self.head
for _ in range(index - 1 ):
lowerCAmelCase__ :List[str] = temp.next
lowerCAmelCase__ :Optional[int] = temp.next
lowerCAmelCase__ :int = temp.next.next
if index == len(self ) - 1: # delete at tail
lowerCAmelCase__ :Optional[Any] = temp
return delete_node.data
def snake_case ( self ):
'''simple docstring'''
return len(self ) == 0
def __A () ->None:
"""simple docstring"""
lowerCAmelCase__ :int = CircularLinkedList()
assert len(_SCREAMING_SNAKE_CASE ) == 0
assert circular_linked_list.is_empty() is True
assert str(_SCREAMING_SNAKE_CASE ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(_SCREAMING_SNAKE_CASE ) == i
circular_linked_list.insert_nth(_SCREAMING_SNAKE_CASE , i + 1 )
assert str(_SCREAMING_SNAKE_CASE ) == "->".join(str(_SCREAMING_SNAKE_CASE ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(_SCREAMING_SNAKE_CASE ) == "->".join(str(_SCREAMING_SNAKE_CASE ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(_SCREAMING_SNAKE_CASE ) == "->".join(str(_SCREAMING_SNAKE_CASE ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(_SCREAMING_SNAKE_CASE ) == "->".join(str(_SCREAMING_SNAKE_CASE ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(_SCREAMING_SNAKE_CASE ) == "->".join(str(_SCREAMING_SNAKE_CASE ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 93 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = tempfile.mkdtemp()
lowerCAmelCase__ :List[Any] = BlipImageProcessor()
lowerCAmelCase__ :Union[str, Any] = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
lowerCAmelCase__ :Union[str, Any] = BertTokenizerFast.from_pretrained('hf-internal-testing/tiny-random-bert' )
lowerCAmelCase__ :List[str] = InstructBlipProcessor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def snake_case ( self , **__UpperCAmelCase ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).tokenizer
def snake_case ( self , **__UpperCAmelCase ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).image_processor
def snake_case ( self , **__UpperCAmelCase ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).qformer_tokenizer
def snake_case ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowerCAmelCase__ :Dict = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ :Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCAmelCase__ :str = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
lowerCAmelCase__ :Dict = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
self.assertIsInstance(processor.qformer_tokenizer , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self.get_image_processor()
lowerCAmelCase__ :List[Any] = self.get_tokenizer()
lowerCAmelCase__ :Optional[int] = self.get_qformer_tokenizer()
lowerCAmelCase__ :str = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
lowerCAmelCase__ :Dict = self.prepare_image_inputs()
lowerCAmelCase__ :List[str] = image_processor(__UpperCAmelCase , return_tensors='np' )
lowerCAmelCase__ :Optional[Any] = processor(images=__UpperCAmelCase , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self.get_image_processor()
lowerCAmelCase__ :Optional[int] = self.get_tokenizer()
lowerCAmelCase__ :Optional[int] = self.get_qformer_tokenizer()
lowerCAmelCase__ :Any = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = 'lower newer'
lowerCAmelCase__ :Dict = processor(text=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = qformer_tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['qformer_' + key] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self.get_image_processor()
lowerCAmelCase__ :Optional[int] = self.get_tokenizer()
lowerCAmelCase__ :str = self.get_qformer_tokenizer()
lowerCAmelCase__ :Dict = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = 'lower newer'
lowerCAmelCase__ :Dict = self.prepare_image_inputs()
lowerCAmelCase__ :Tuple = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.get_image_processor()
lowerCAmelCase__ :Dict = self.get_tokenizer()
lowerCAmelCase__ :Optional[int] = self.get_qformer_tokenizer()
lowerCAmelCase__ :int = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
lowerCAmelCase__ :List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase__ :Tuple = processor.batch_decode(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.get_image_processor()
lowerCAmelCase__ :Optional[int] = self.get_tokenizer()
lowerCAmelCase__ :Dict = self.get_qformer_tokenizer()
lowerCAmelCase__ :Optional[Any] = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = 'lower newer'
lowerCAmelCase__ :Optional[int] = self.prepare_image_inputs()
lowerCAmelCase__ :int = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
| 93 | 1 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
_UpperCAmelCase : int = logging.get_logger(__name__)
_UpperCAmelCase : Optional[int] = {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''',
}
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 't5'
UpperCamelCase__ = ['past_key_values']
UpperCamelCase__ = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self , snake_case_=3_21_28 , snake_case_=5_12 , snake_case_=64 , snake_case_=20_48 , snake_case_=6 , snake_case_=None , snake_case_=8 , snake_case_=32 , snake_case_=1_28 , snake_case_=0.1 , snake_case_=1E-6 , snake_case_=1.0 , snake_case_="relu" , snake_case_=True , snake_case_=True , snake_case_=0 , snake_case_=1 , **snake_case_ , ):
lowercase =vocab_size
lowercase =d_model
lowercase =d_kv
lowercase =d_ff
lowercase =num_layers
lowercase =(
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowercase =num_heads
lowercase =relative_attention_num_buckets
lowercase =relative_attention_max_distance
lowercase =dropout_rate
lowercase =layer_norm_epsilon
lowercase =initializer_factor
lowercase =feed_forward_proj
lowercase =use_cache
lowercase =self.feed_forward_proj.split('''-''' )
lowercase =act_info[-1]
lowercase =act_info[0] == """gated"""
if len(_a ) > 1 and act_info[0] != "gated" or len(_a ) > 2:
raise ValueError(
f'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowercase ="""gelu_new"""
super().__init__(
pad_token_id=_a , eos_token_id=_a , is_encoder_decoder=_a , **_a , )
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
@property
def _A( self ):
lowercase ={
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
lowercase ="""past_encoder_sequence + sequence"""
lowercase ={0: """batch"""}
lowercase ={0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
lowercase ={0: """batch""", 1: """decoder_sequence"""}
lowercase ={0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(_a , direction='''inputs''' )
return common_inputs
@property
def _A( self ):
return 13
| 705 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : Any = {
'''configuration_blenderbot_small''': [
'''BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotSmallConfig''',
'''BlenderbotSmallOnnxConfig''',
],
'''tokenization_blenderbot_small''': ['''BlenderbotSmallTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = ['''BlenderbotSmallTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[Any] = [
'''BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotSmallForCausalLM''',
'''BlenderbotSmallForConditionalGeneration''',
'''BlenderbotSmallModel''',
'''BlenderbotSmallPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Any = [
'''TFBlenderbotSmallForConditionalGeneration''',
'''TFBlenderbotSmallModel''',
'''TFBlenderbotSmallPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[Any] = [
'''FlaxBlenderbotSmallForConditionalGeneration''',
'''FlaxBlenderbotSmallModel''',
'''FlaxBlenderbotSmallPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 145 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json",
"roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json",
}
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''roberta'''
def __init__( self ,SCREAMING_SNAKE_CASE__=5_02_65 ,SCREAMING_SNAKE_CASE__=7_68 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=30_72 ,SCREAMING_SNAKE_CASE__="gelu" ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=5_12 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=0.0_2 ,SCREAMING_SNAKE_CASE__=1E-12 ,SCREAMING_SNAKE_CASE__=1 ,SCREAMING_SNAKE_CASE__=0 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__="absolute" ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=None ,**SCREAMING_SNAKE_CASE__ ,) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ ,bos_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = vocab_size
__SCREAMING_SNAKE_CASE :Union[str, Any] = hidden_size
__SCREAMING_SNAKE_CASE :str = num_hidden_layers
__SCREAMING_SNAKE_CASE :Union[str, Any] = num_attention_heads
__SCREAMING_SNAKE_CASE :Union[str, Any] = hidden_act
__SCREAMING_SNAKE_CASE :Any = intermediate_size
__SCREAMING_SNAKE_CASE :Optional[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE :Tuple = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE :Union[str, Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE :str = type_vocab_size
__SCREAMING_SNAKE_CASE :List[str] = initializer_range
__SCREAMING_SNAKE_CASE :Optional[int] = layer_norm_eps
__SCREAMING_SNAKE_CASE :Optional[Any] = position_embedding_type
__SCREAMING_SNAKE_CASE :int = use_cache
__SCREAMING_SNAKE_CASE :Tuple = classifier_dropout
class _SCREAMING_SNAKE_CASE( A ):
@property
def _UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
__SCREAMING_SNAKE_CASE :Union[str, Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__SCREAMING_SNAKE_CASE :Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] ) | 498 |
"""simple docstring"""
import argparse
import os
import re
lowerCamelCase_ = "src/transformers/models/auto"
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
lowerCamelCase_ = re.compile(r"[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict")
# re pattern that matches identifiers in mappings
lowerCamelCase_ = re.compile(r"\s*\(\s*\"(\S[^\"]+)\"")
def __lowerCamelCase ( a_ : Optional[Any] , a_ : bool = False ) -> Tuple:
with open(a_ , '''r''' , encoding='''utf-8''' ) as f:
__SCREAMING_SNAKE_CASE :Union[str, Any] = f.read()
__SCREAMING_SNAKE_CASE :Dict = content.split('''\n''' )
__SCREAMING_SNAKE_CASE :List[Any] = []
__SCREAMING_SNAKE_CASE :Optional[int] = 0
while line_idx < len(a_ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
__SCREAMING_SNAKE_CASE :str = len(re.search(r'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
__SCREAMING_SNAKE_CASE :Dict = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__SCREAMING_SNAKE_CASE :List[str] = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
__SCREAMING_SNAKE_CASE :Optional[Any] = sorted(a_ , key=lambda a_ : _re_identifier.search(a_ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(a_ , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(a_ ) )
elif "\n".join(a_ ) != content:
return True
def __lowerCamelCase ( a_ : bool = False ) -> int:
__SCREAMING_SNAKE_CASE :str = [os.path.join(a_ , a_ ) for f in os.listdir(a_ ) if f.endswith('''.py''' )]
__SCREAMING_SNAKE_CASE :List[str] = [sort_auto_mapping(a_ , overwrite=a_ ) for fname in fnames]
if not overwrite and any(a_ ):
__SCREAMING_SNAKE_CASE :str = [f for f, d in zip(a_ , a_ ) if d]
raise ValueError(
f'''The following files have auto mappings that need sorting: {", ".join(a_ )}. Run `make style` to fix'''
''' this.''' )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
lowerCamelCase_ = parser.parse_args()
sort_all_auto_mappings(not args.check_only) | 498 | 1 |
'''simple docstring'''
import math
class UpperCamelCase__ :
"""simple docstring"""
def snake_case ( self : int , __A : list[list[float]] , __A : list[int] ):
"""simple docstring"""
_lowercase = 0.0
_lowercase = 0.0
for i in range(len(_lowerCAmelCase ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def snake_case ( self : str , __A : list[list[int | float]] , __A : list[int] , __A : int , __A : float ):
"""simple docstring"""
for i in range(len(_lowerCAmelCase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def A__ ( ) -> Optional[int]:
_lowercase = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_lowercase = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_lowercase = SelfOrganizingMap()
_lowercase = 3
_lowercase = 0.5
for _ in range(A_ ):
for j in range(len(A_ ) ):
# training sample
_lowercase = training_samples[j]
# Compute the winning vector
_lowercase = self_organizing_map.get_winner(A_ , A_ )
# Update the winning vector
_lowercase = self_organizing_map.update(A_ , A_ , A_ , A_ )
# classify test sample
_lowercase = [0, 0, 0, 1]
_lowercase = self_organizing_map.get_winner(A_ , A_ )
# results
print(F"""Clusters that the test sample belongs to : {winner}""" )
print(F"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main()
| 709 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase__ ( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = BioGptTokenizer
UpperCAmelCase__ = False
def snake_case ( self : Dict ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowercase = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
_lowercase = dict(zip(__A , range(len(__A ) ) ) )
_lowercase = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
_lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(__A ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(__A ) )
def snake_case ( self : Optional[Any] , __A : List[Any] ):
"""simple docstring"""
_lowercase = "lower newer"
_lowercase = "lower newer"
return input_text, output_text
def snake_case ( self : List[Any] ):
"""simple docstring"""
_lowercase = BioGptTokenizer(self.vocab_file , self.merges_file )
_lowercase = "lower"
_lowercase = ["low", "er</w>"]
_lowercase = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
_lowercase = tokens + ["<unk>"]
_lowercase = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
@slow
def snake_case ( self : Optional[int] ):
"""simple docstring"""
_lowercase = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
_lowercase = tokenizer.encode("sequence builders" , add_special_tokens=__A )
_lowercase = tokenizer.encode("multi-sequence build" , add_special_tokens=__A )
_lowercase = tokenizer.build_inputs_with_special_tokens(__A )
_lowercase = tokenizer.build_inputs_with_special_tokens(__A , __A )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 602 | 0 |
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__UpperCamelCase = logging.getLogger(__name__)
class _A ( __lowercase ):
def lowercase__ ( self : int , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : Union[str, Any]=None , __magic_name__ : Optional[Any]=None ) -> str:
"""simple docstring"""
__snake_case : List[Any] = self.layer[current_layer](__magic_name__ , __magic_name__ , head_mask[current_layer] )
__snake_case : Any = layer_outputs[0]
return hidden_states
@add_start_docstrings(
'''The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.''' , __lowercase , )
class _A ( __lowercase ):
def __init__( self : Optional[Any] , __magic_name__ : str ) -> Dict:
"""simple docstring"""
super().__init__(__magic_name__ )
__snake_case : Optional[Any] = BertEncoderWithPabee(__magic_name__ )
self.init_weights()
__snake_case : Union[str, Any] = 0
__snake_case : Union[str, Any] = 0
__snake_case : Optional[int] = 0
__snake_case : List[str] = 0
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Tuple ) -> Dict:
"""simple docstring"""
__snake_case : Tuple = threshold
def lowercase__ ( self : List[str] , __magic_name__ : List[Any] ) -> Tuple:
"""simple docstring"""
__snake_case : List[Any] = patience
def lowercase__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__snake_case : Optional[Any] = 0
__snake_case : Dict = 0
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case : Optional[Any] = self.inference_layers_num / self.inference_instances_num
__snake_case : Any = (
f'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
f''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(__magic_name__ )
@add_start_docstrings_to_model_forward(__magic_name__ )
def lowercase__ ( self : List[Any] , __magic_name__ : List[str]=None , __magic_name__ : List[Any]=None , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[Any]=None , __magic_name__ : int=None , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[Any]=None , __magic_name__ : Optional[Any]=None , __magic_name__ : List[str]=None , __magic_name__ : Optional[Any]=None , __magic_name__ : Optional[int]=False , ) -> str:
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
__snake_case : Union[str, Any] = input_ids.size()
elif inputs_embeds is not None:
__snake_case : List[str] = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
__snake_case : Tuple = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__snake_case : int = torch.ones(__magic_name__ , device=__magic_name__ )
if token_type_ids is None:
__snake_case : int = torch.zeros(__magic_name__ , dtype=torch.long , device=__magic_name__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__snake_case : torch.Tensor = self.get_extended_attention_mask(__magic_name__ , __magic_name__ , __magic_name__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__snake_case , __snake_case , __snake_case : str = encoder_hidden_states.size()
__snake_case : List[Any] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__snake_case : List[Any] = torch.ones(__magic_name__ , device=__magic_name__ )
__snake_case : Optional[Any] = self.invert_attention_mask(__magic_name__ )
else:
__snake_case : Any = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__snake_case : List[Any] = self.get_head_mask(__magic_name__ , self.config.num_hidden_layers )
__snake_case : Optional[Any] = self.embeddings(
input_ids=__magic_name__ , position_ids=__magic_name__ , token_type_ids=__magic_name__ , inputs_embeds=__magic_name__ )
__snake_case : Optional[int] = embedding_output
if self.training:
__snake_case : Any = []
for i in range(self.config.num_hidden_layers ):
__snake_case : Any = self.encoder.adaptive_forward(
__magic_name__ , current_layer=__magic_name__ , attention_mask=__magic_name__ , head_mask=__magic_name__ )
__snake_case : int = self.pooler(__magic_name__ )
__snake_case : Tuple = output_layers[i](output_dropout(__magic_name__ ) )
res.append(__magic_name__ )
elif self.patience == 0: # Use all layers for inference
__snake_case : int = self.encoder(
__magic_name__ , attention_mask=__magic_name__ , head_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , )
__snake_case : Tuple = self.pooler(encoder_outputs[0] )
__snake_case : Optional[int] = [output_layers[self.config.num_hidden_layers - 1](__magic_name__ )]
else:
__snake_case : List[str] = 0
__snake_case : List[Any] = None
__snake_case : Optional[int] = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__snake_case : List[Any] = self.encoder.adaptive_forward(
__magic_name__ , current_layer=__magic_name__ , attention_mask=__magic_name__ , head_mask=__magic_name__ )
__snake_case : Union[str, Any] = self.pooler(__magic_name__ )
__snake_case : str = output_layers[i](__magic_name__ )
if regression:
__snake_case : str = logits.detach()
if patient_result is not None:
__snake_case : Any = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__snake_case : Any = 0
else:
__snake_case : int = logits.detach().argmax(dim=1 )
if patient_result is not None:
__snake_case : Any = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(__magic_name__ ) ):
patient_counter += 1
else:
__snake_case : Tuple = 0
__snake_case : Optional[int] = logits
if patient_counter == self.patience:
break
__snake_case : Tuple = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
'''Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. ''' , __lowercase , )
class _A ( __lowercase ):
def __init__( self : Union[str, Any] , __magic_name__ : str ) -> Any:
"""simple docstring"""
super().__init__(__magic_name__ )
__snake_case : List[str] = config.num_labels
__snake_case : Union[str, Any] = BertModelWithPabee(__magic_name__ )
__snake_case : str = nn.Dropout(config.hidden_dropout_prob )
__snake_case : Union[str, Any] = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(__magic_name__ )
def lowercase__ ( self : List[str] , __magic_name__ : Dict=None , __magic_name__ : str=None , __magic_name__ : Union[str, Any]=None , __magic_name__ : str=None , __magic_name__ : Dict=None , __magic_name__ : str=None , __magic_name__ : Tuple=None , ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Any = self.bert(
input_ids=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , position_ids=__magic_name__ , head_mask=__magic_name__ , inputs_embeds=__magic_name__ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
__snake_case : Optional[int] = (logits[-1],)
if labels is not None:
__snake_case : List[str] = None
__snake_case : Optional[Any] = 0
for ix, logits_item in enumerate(__magic_name__ ):
if self.num_labels == 1:
# We are doing regression
__snake_case : Optional[int] = MSELoss()
__snake_case : List[Any] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
__snake_case : Union[str, Any] = CrossEntropyLoss()
__snake_case : List[Any] = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
__snake_case : List[Any] = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__snake_case : List[Any] = (total_loss / total_weights,) + outputs
return outputs
| 26 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__UpperCamelCase = "bart"
__UpperCamelCase = True
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
if LOAD_DENSE_INDEX:
__snake_case : int = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
__snake_case : Tuple = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
__snake_case : List[Any] = qar_model.eval()
else:
__snake_case , __snake_case : Optional[Any] = (None, None)
if MODEL_TYPE == "bart":
__snake_case : List[str] = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
__snake_case : Any = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
__snake_case : int = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
__snake_case : int = sas_model.eval()
else:
__snake_case , __snake_case : Dict = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> Tuple:
"""simple docstring"""
if LOAD_DENSE_INDEX:
__snake_case : Tuple = faiss.StandardGpuResources()
__snake_case : Optional[Any] = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
__snake_case : str = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
__snake_case : Optional[int] = faiss.IndexFlatIP(128 )
__snake_case : Any = faiss.index_cpu_to_gpu(_lowerCamelCase , 1 , _lowerCamelCase )
wikiaab_gpu_index_flat.add(_lowerCamelCase ) # TODO fix for larger GPU
else:
__snake_case , __snake_case : Tuple = (None, None)
__snake_case : List[str] = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
__snake_case : Dict = elia["""train_eli5"""]
__snake_case : int = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
__snake_case : Dict = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_lowerCamelCase )
return (elia_train, eli5_train_q_index)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = load_indexes()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = load_models()
__UpperCamelCase , __UpperCamelCase = load_train_data()
def _a ( _lowerCamelCase , _lowerCamelCase=10 ) -> int:
"""simple docstring"""
__snake_case : Optional[int] = embed_questions_for_retrieval([question] , _lowerCamelCase , _lowerCamelCase )
__snake_case , __snake_case : Tuple = eli5_train_q_index.search(_lowerCamelCase , _lowerCamelCase )
__snake_case : Tuple = [elia_train[int(_lowerCamelCase )] for i in I[0]]
return nn_examples
def _a ( _lowerCamelCase , _lowerCamelCase="wiki40b" , _lowerCamelCase="dense" , _lowerCamelCase=10 ) -> Optional[Any]:
"""simple docstring"""
if source == "none":
__snake_case , __snake_case : Dict = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__snake_case , __snake_case : Dict = query_qa_dense_index(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
__snake_case , __snake_case : str = query_es_index(
_lowerCamelCase , _lowerCamelCase , index_name="""english_wiki40b_snippets_100w""" , n_results=_lowerCamelCase , )
__snake_case : Optional[int] = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
__snake_case : Optional[Any] = """question: {} context: {}""".format(_lowerCamelCase , _lowerCamelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _lowerCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowerCamelCase : None),
} )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=64 , _lowerCamelCase=256 , _lowerCamelCase=False , _lowerCamelCase=2 , _lowerCamelCase=0.95 , _lowerCamelCase=0.8 ) -> List[str]:
"""simple docstring"""
with torch.no_grad():
__snake_case : Union[str, Any] = qa_sas_generate(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , num_answers=1 , num_beams=_lowerCamelCase , min_len=_lowerCamelCase , max_len=_lowerCamelCase , do_sample=_lowerCamelCase , temp=_lowerCamelCase , top_p=_lowerCamelCase , top_k=_lowerCamelCase , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
__UpperCamelCase = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
__UpperCamelCase = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__UpperCamelCase = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
__UpperCamelCase = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
__UpperCamelCase = st.sidebar.checkbox("Demo options")
if demo_options:
__UpperCamelCase = st.sidebar.selectbox(
"",
action_list,
index=3,
)
__UpperCamelCase = action_list.index(action_st)
__UpperCamelCase = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
__UpperCamelCase = show_type == "Show full text of passages"
else:
__UpperCamelCase = 3
__UpperCamelCase = True
__UpperCamelCase = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
__UpperCamelCase = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
__UpperCamelCase = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
__UpperCamelCase = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
__UpperCamelCase = "wiki40b"
__UpperCamelCase = "dense"
__UpperCamelCase = "beam"
__UpperCamelCase = 2
__UpperCamelCase = 64
__UpperCamelCase = 256
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = st.sidebar.checkbox("Generation options")
if generate_options:
__UpperCamelCase = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
__UpperCamelCase = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
__UpperCamelCase = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__UpperCamelCase = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__UpperCamelCase = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__UpperCamelCase = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__UpperCamelCase = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__UpperCamelCase = None
# start main text
__UpperCamelCase = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
__UpperCamelCase = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__UpperCamelCase = st.text_input("Enter your question here:", "")
else:
__UpperCamelCase = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method="dense", n_results=10)
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method="sparse", n_results=10)
__UpperCamelCase = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__UpperCamelCase = support_list[:10]
__UpperCamelCase = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__UpperCamelCase , __UpperCamelCase = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
__UpperCamelCase = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
__UpperCamelCase = res[1].strip()
if sec_titles == "":
__UpperCamelCase = "[{}]({})".format(res[0], wiki_url)
else:
__UpperCamelCase = sec_titles.split(" & ")
__UpperCamelCase = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
__UpperCamelCase = find_nearest_training(question)
__UpperCamelCase = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
__UpperCamelCase = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
__UpperCamelCase = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 26 | 1 |
'''simple docstring'''
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def _lowerCAmelCase ( __a ) -> Any:
'''simple docstring'''
_UpperCamelCase :str ={}
_UpperCamelCase :Dict =tokenizer(example["""content"""] , truncation=__a )["""input_ids"""]
_UpperCamelCase :int =len(example["""content"""] ) / len(output["""input_ids"""] )
return output
_lowerCamelCase : int = HfArgumentParser(PretokenizationArguments)
_lowerCamelCase : Optional[Any] = parser.parse_args()
if args.num_workers is None:
_lowerCamelCase : Any = multiprocessing.cpu_count()
_lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
_lowerCamelCase : Optional[int] = time.time()
_lowerCamelCase : Dict = load_dataset(args.dataset_name, split="""train""")
print(f"Dataset loaded in {time.time()-t_start:.2f}s")
_lowerCamelCase : Dict = time.time()
_lowerCamelCase : List[str] = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(f"Dataset tokenized in {time.time()-t_start:.2f}s")
_lowerCamelCase : str = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f"Data pushed to the hub in {time.time()-t_start:.2f}s") | 512 | '''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def _lowerCAmelCase ( __a ) -> Dict:
'''simple docstring'''
return {key.lstrip("""-""" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def _lowerCAmelCase ( ) -> Any:
'''simple docstring'''
_UpperCamelCase :Union[str, Any] =ArgumentParser(
"""HuggingFace Datasets CLI tool""" , usage="""datasets-cli <command> [<args>]""" , allow_abbrev=__a )
_UpperCamelCase :Any =parser.add_subparsers(help="""datasets-cli command helpers""" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(__a )
EnvironmentCommand.register_subcommand(__a )
TestCommand.register_subcommand(__a )
RunBeamCommand.register_subcommand(__a )
DummyDataCommand.register_subcommand(__a )
# Parse args
_UpperCamelCase , _UpperCamelCase :Union[str, Any] =parser.parse_known_args()
if not hasattr(__a , """func""" ):
parser.print_help()
exit(1 )
_UpperCamelCase :Optional[int] =parse_unknown_args(__a )
# Run
_UpperCamelCase :List[str] =args.func(__a , **__a )
service.run()
if __name__ == "__main__":
main() | 512 | 1 |
'''simple docstring'''
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
lowerCAmelCase : Optional[Any] = 50_00_00
lowerCAmelCase : Dict = os.path.split(__file__)
lowerCAmelCase : Optional[Any] = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def A_( A : Union[str, Any] , **A : Dict):
UpperCamelCase = dataset.map(**_lowerCAmelCase)
@get_duration
def A_( A : int , **A : Union[str, Any]):
UpperCamelCase = dataset.filter(**_lowerCAmelCase)
def A_( ):
UpperCamelCase = {'num examples': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = datasets.Features({'text': datasets.Value('string'), 'numbers': datasets.Value('float32')})
UpperCamelCase = generate_example_dataset(
os.path.join(_lowerCAmelCase , 'dataset.arrow') , _lowerCAmelCase , num_examples=_lowerCAmelCase)
UpperCamelCase = transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=_lowerCAmelCase)
def tokenize(A : List[str]):
return tokenizer(examples['text'])
UpperCamelCase = map(_lowerCAmelCase)
UpperCamelCase = map(_lowerCAmelCase , batched=_lowerCAmelCase)
UpperCamelCase = map(_lowerCAmelCase , function=lambda A: None , batched=_lowerCAmelCase)
with dataset.formatted_as(type='numpy'):
UpperCamelCase = map(_lowerCAmelCase , function=lambda A: None , batched=_lowerCAmelCase)
with dataset.formatted_as(type='pandas'):
UpperCamelCase = map(_lowerCAmelCase , function=lambda A: None , batched=_lowerCAmelCase)
with dataset.formatted_as(type='torch' , columns='numbers'):
UpperCamelCase = map(_lowerCAmelCase , function=lambda A: None , batched=_lowerCAmelCase)
with dataset.formatted_as(type='tensorflow' , columns='numbers'):
UpperCamelCase = map(_lowerCAmelCase , function=lambda A: None , batched=_lowerCAmelCase)
UpperCamelCase = map(_lowerCAmelCase , function=_lowerCAmelCase , batched=_lowerCAmelCase)
UpperCamelCase = filter(_lowerCAmelCase)
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(_lowerCAmelCase , 'wb') as f:
f.write(json.dumps(_lowerCAmelCase).encode('utf-8'))
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 3 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case__ : str = logging.get_logger(__name__)
snake_case__ : Union[str, Any] = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class snake_case ( _snake_case ):
'''simple docstring'''
UpperCamelCase__ : Any = "deformable_detr"
UpperCamelCase__ : Optional[int] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Dict , lowerCamelCase_ : Tuple=True , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : Dict=3 , lowerCamelCase_ : Dict=300 , lowerCamelCase_ : List[str]=1024 , lowerCamelCase_ : Union[str, Any]=6 , lowerCamelCase_ : Tuple=1024 , lowerCamelCase_ : int=8 , lowerCamelCase_ : str=6 , lowerCamelCase_ : Union[str, Any]=1024 , lowerCamelCase_ : Union[str, Any]=8 , lowerCamelCase_ : Dict=0.0 , lowerCamelCase_ : str=True , lowerCamelCase_ : Union[str, Any]="relu" , lowerCamelCase_ : int=256 , lowerCamelCase_ : Optional[Any]=0.1 , lowerCamelCase_ : Optional[Any]=0.0 , lowerCamelCase_ : str=0.0 , lowerCamelCase_ : List[Any]=0.02 , lowerCamelCase_ : Optional[Any]=1.0 , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Dict=False , lowerCamelCase_ : Union[str, Any]="sine" , lowerCamelCase_ : Union[str, Any]="resnet50" , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Union[str, Any]=False , lowerCamelCase_ : Tuple=4 , lowerCamelCase_ : Union[str, Any]=4 , lowerCamelCase_ : Optional[Any]=4 , lowerCamelCase_ : Dict=False , lowerCamelCase_ : Any=300 , lowerCamelCase_ : int=False , lowerCamelCase_ : Optional[Any]=1 , lowerCamelCase_ : Tuple=5 , lowerCamelCase_ : Dict=2 , lowerCamelCase_ : Union[str, Any]=1 , lowerCamelCase_ : List[Any]=1 , lowerCamelCase_ : Optional[Any]=5 , lowerCamelCase_ : Dict=2 , lowerCamelCase_ : int=0.1 , lowerCamelCase_ : Union[str, Any]=0.25 , lowerCamelCase_ : Dict=False , **lowerCamelCase_ : List[Any] , ) ->List[str]:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
UpperCAmelCase__ = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCAmelCase__ = backbone_config.get("""model_type""" )
UpperCAmelCase__ = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase__ = config_class.from_dict(lowerCamelCase_ )
UpperCAmelCase__ = use_timm_backbone
UpperCAmelCase__ = backbone_config
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = num_queries
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = d_model
UpperCAmelCase__ = encoder_ffn_dim
UpperCAmelCase__ = encoder_layers
UpperCAmelCase__ = encoder_attention_heads
UpperCAmelCase__ = decoder_ffn_dim
UpperCAmelCase__ = decoder_layers
UpperCAmelCase__ = decoder_attention_heads
UpperCAmelCase__ = dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = activation_dropout
UpperCAmelCase__ = activation_function
UpperCAmelCase__ = init_std
UpperCAmelCase__ = init_xavier_std
UpperCAmelCase__ = encoder_layerdrop
UpperCAmelCase__ = auxiliary_loss
UpperCAmelCase__ = position_embedding_type
UpperCAmelCase__ = backbone
UpperCAmelCase__ = use_pretrained_backbone
UpperCAmelCase__ = dilation
# deformable attributes
UpperCAmelCase__ = num_feature_levels
UpperCAmelCase__ = encoder_n_points
UpperCAmelCase__ = decoder_n_points
UpperCAmelCase__ = two_stage
UpperCAmelCase__ = two_stage_num_proposals
UpperCAmelCase__ = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
UpperCAmelCase__ = class_cost
UpperCAmelCase__ = bbox_cost
UpperCAmelCase__ = giou_cost
# Loss coefficients
UpperCAmelCase__ = mask_loss_coefficient
UpperCAmelCase__ = dice_loss_coefficient
UpperCAmelCase__ = bbox_loss_coefficient
UpperCAmelCase__ = giou_loss_coefficient
UpperCAmelCase__ = eos_coefficient
UpperCAmelCase__ = focal_alpha
UpperCAmelCase__ = disable_custom_kernels
super().__init__(is_encoder_decoder=lowerCamelCase_ , **lowerCamelCase_ )
@property
def UpperCAmelCase ( self : Union[str, Any] ) ->int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase ( self : List[Any] ) ->int:
'''simple docstring'''
return self.d_model
def UpperCAmelCase ( self : List[Any] ) ->Any:
'''simple docstring'''
UpperCAmelCase__ = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
UpperCAmelCase__ = self.backbone_config.to_dict()
UpperCAmelCase__ = self.__class__.model_type
return output
| 392 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase : Optional[Any] = {
'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'],
'tokenization_canine': ['CanineTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Union[str, Any] = [
'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST',
'CanineForMultipleChoice',
'CanineForQuestionAnswering',
'CanineForSequenceClassification',
'CanineForTokenClassification',
'CanineLayer',
'CanineModel',
'CaninePreTrainedModel',
'load_tf_weights_in_canine',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 353 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A_ ( a , a , a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = TaConfig.from_json_file(a )
print(f"Building PyTorch model from configuration: {config}" )
SCREAMING_SNAKE_CASE_ : Tuple = TaForConditionalGeneration(a )
# Load weights from tf checkpoint
load_tf_weights_in_ta(a , a , a )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(a )
if __name__ == "__main__":
lowerCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 353 | 1 |
'''simple docstring'''
from collections import defaultdict
def A_ ( snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Any = first_str.lower().strip()
SCREAMING_SNAKE_CASE:Optional[int] = second_str.lower().strip()
# Remove whitespace
SCREAMING_SNAKE_CASE:int = first_str.replace(" " , "" )
SCREAMING_SNAKE_CASE:Optional[int] = second_str.replace(" " , "" )
# Strings of different lengths are not anagrams
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
return False
# Default values for count should be 0
SCREAMING_SNAKE_CASE:List[str] = defaultdict(__UpperCamelCase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(__UpperCamelCase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
A_ = input("Enter the first string ").strip()
A_ = input("Enter the second string ").strip()
A_ = check_anagrams(input_a, input_b)
print(f'''{input_a} and {input_b} are {"" if status else "not "}anagrams.''')
| 143 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : List[Any] = {"configuration_timm_backbone": ["TimmBackboneConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Dict = ["TimmBackbone"]
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 141 | 0 |
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class a ( _UpperCAmelCase ,unittest.TestCase ):
UpperCAmelCase__ : List[str] = PhobertTokenizer
UpperCAmelCase__ : int = False
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCamelCase: Any = ["""T@@""", """i""", """I""", """R@@""", """r""", """e@@"""]
__lowerCamelCase: Union[str, Any] = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__lowerCamelCase: Optional[int] = ["""#version: 0.2""", """l à</w>"""]
__lowerCamelCase: Dict = {"""unk_token""": """<unk>"""}
__lowerCamelCase: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCamelCase: str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
for token in vocab_tokens:
fp.write(F'{token} {vocab_tokens[token]}\n' )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(SCREAMING_SNAKE_CASE_ ) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple ):
__lowerCamelCase: Optional[Any] = """Tôi là VinAI Research"""
__lowerCamelCase: int = """T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>"""
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self : str ):
__lowerCamelCase: Optional[Any] = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCamelCase: Any = """Tôi là VinAI Research"""
__lowerCamelCase: int = """T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h""".split()
__lowerCamelCase: Optional[int] = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
print(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: str = tokens + [tokenizer.unk_token]
__lowerCamelCase: Any = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
| 189 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_A : Optional[Any] = logging.get_logger(__name__)
# General docstring
_A : int = '''ResNetConfig'''
# Base docstring
_A : Union[str, Any] = '''microsoft/resnet-50'''
_A : str = [1, 2_048, 7, 7]
# Image classification docstring
_A : List[str] = '''microsoft/resnet-50'''
_A : Optional[int] = '''tiger cat'''
_A : Optional[Any] = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class a ( nn.Module ):
def __init__( self : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 3 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : str = "relu" ):
super().__init__()
__lowerCamelCase: Optional[int] = nn.Convad(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , kernel_size=SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , padding=kernel_size // 2 , bias=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: List[str] = nn.BatchNormad(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Dict = ACTaFN[activation] if activation is not None else nn.Identity()
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Tensor ):
__lowerCamelCase: Optional[int] = self.convolution(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Tuple = self.normalization(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Union[str, Any] = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class a ( nn.Module ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : ResNetConfig ):
super().__init__()
__lowerCamelCase: Tuple = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
__lowerCamelCase: Tuple = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
__lowerCamelCase: List[Any] = config.num_channels
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : Tensor ):
__lowerCamelCase: Optional[Any] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
__lowerCamelCase: Optional[Any] = self.embedder(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Tuple = self.pooler(SCREAMING_SNAKE_CASE_ )
return embedding
class a ( nn.Module ):
def __init__( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 2 ):
super().__init__()
__lowerCamelCase: Optional[Any] = nn.Convad(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , kernel_size=1 , stride=SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Optional[Any] = nn.BatchNormad(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : Tensor ):
__lowerCamelCase: Optional[int] = self.convolution(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: List[Any] = self.normalization(SCREAMING_SNAKE_CASE_ )
return hidden_state
class a ( nn.Module ):
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : str = "relu" ):
super().__init__()
__lowerCamelCase: Union[str, Any] = in_channels != out_channels or stride != 1
__lowerCamelCase: int = (
ResNetShortCut(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ ) if should_apply_shortcut else nn.Identity()
)
__lowerCamelCase: List[str] = nn.Sequential(
ResNetConvLayer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ ) , ResNetConvLayer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , activation=SCREAMING_SNAKE_CASE_ ) , )
__lowerCamelCase: Dict = ACTaFN[activation]
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Any ):
__lowerCamelCase: List[Any] = hidden_state
__lowerCamelCase: Optional[Any] = self.layer(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Union[str, Any] = self.shortcut(SCREAMING_SNAKE_CASE_ )
hidden_state += residual
__lowerCamelCase: str = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class a ( nn.Module ):
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : str = "relu" , SCREAMING_SNAKE_CASE_ : int = 4 ):
super().__init__()
__lowerCamelCase: str = in_channels != out_channels or stride != 1
__lowerCamelCase: Any = out_channels // reduction
__lowerCamelCase: str = (
ResNetShortCut(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ ) if should_apply_shortcut else nn.Identity()
)
__lowerCamelCase: Any = nn.Sequential(
ResNetConvLayer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , kernel_size=1 ) , ResNetConvLayer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ ) , ResNetConvLayer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE_ ) , )
__lowerCamelCase: str = ACTaFN[activation]
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
__lowerCamelCase: Optional[Any] = hidden_state
__lowerCamelCase: int = self.layer(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Dict = self.shortcut(SCREAMING_SNAKE_CASE_ )
hidden_state += residual
__lowerCamelCase: List[str] = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class a ( nn.Module ):
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : ResNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , ):
super().__init__()
__lowerCamelCase: str = ResNetBottleNeckLayer if config.layer_type == """bottleneck""" else ResNetBasicLayer
__lowerCamelCase: str = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , activation=config.hidden_act ) , *[layer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tensor ):
__lowerCamelCase: Tuple = input
for layer in self.layers:
__lowerCamelCase: int = layer(SCREAMING_SNAKE_CASE_ )
return hidden_state
class a ( nn.Module ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : ResNetConfig ):
super().__init__()
__lowerCamelCase: Any = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
SCREAMING_SNAKE_CASE_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
__lowerCamelCase: List[str] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(SCREAMING_SNAKE_CASE_ , config.depths[1:] ):
self.stages.append(ResNetStage(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , depth=SCREAMING_SNAKE_CASE_ ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Tensor , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = True ):
__lowerCamelCase: List[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__lowerCamelCase: Optional[Any] = hidden_states + (hidden_state,)
__lowerCamelCase: Union[str, Any] = stage_module(SCREAMING_SNAKE_CASE_ )
if output_hidden_states:
__lowerCamelCase: Union[str, Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE_ , hidden_states=SCREAMING_SNAKE_CASE_ , )
class a ( _UpperCAmelCase ):
UpperCAmelCase__ : Dict = ResNetConfig
UpperCAmelCase__ : str = "resnet"
UpperCAmelCase__ : Optional[int] = "pixel_values"
UpperCAmelCase__ : List[str] = True
def SCREAMING_SNAKE_CASE__ ( self : Tuple , SCREAMING_SNAKE_CASE_ : Tuple ):
if isinstance(SCREAMING_SNAKE_CASE_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(SCREAMING_SNAKE_CASE_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str=False ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase: Union[str, Any] = value
_A : Dict = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_A : Optional[int] = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare ResNet model outputting raw features without any specific head on top." ,_UpperCAmelCase ,)
class a ( _UpperCAmelCase ):
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
super().__init__(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Dict = config
__lowerCamelCase: Tuple = ResNetEmbeddings(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: List[str] = ResNetEncoder(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: List[Any] = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : Tensor , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None ):
__lowerCamelCase: Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase: List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase: Any = self.embedder(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: List[str] = self.encoder(
SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: List[str] = encoder_outputs[0]
__lowerCamelCase: List[Any] = self.pooler(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE_ , pooler_output=SCREAMING_SNAKE_CASE_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,_UpperCAmelCase ,)
class a ( _UpperCAmelCase ):
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict ):
super().__init__(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Optional[Any] = config.num_labels
__lowerCamelCase: int = ResNetModel(SCREAMING_SNAKE_CASE_ )
# classification head
__lowerCamelCase: str = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE_ : Optional[torch.LongTensor] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , ):
__lowerCamelCase: Tuple = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase: List[str] = self.resnet(SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Tuple = outputs.pooler_output if return_dict else outputs[1]
__lowerCamelCase: str = self.classifier(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Dict = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowerCamelCase: Dict = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowerCamelCase: Dict = """single_label_classification"""
else:
__lowerCamelCase: Tuple = """multi_label_classification"""
if self.config.problem_type == "regression":
__lowerCamelCase: Any = MSELoss()
if self.num_labels == 1:
__lowerCamelCase: Dict = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__lowerCamelCase: str = loss_fct(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif self.config.problem_type == "single_label_classification":
__lowerCamelCase: List[Any] = CrossEntropyLoss()
__lowerCamelCase: Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__lowerCamelCase: int = BCEWithLogitsLoss()
__lowerCamelCase: str = loss_fct(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not return_dict:
__lowerCamelCase: List[str] = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n " ,_UpperCAmelCase ,)
class a ( _UpperCAmelCase ,_UpperCAmelCase ):
def __init__( self : str , SCREAMING_SNAKE_CASE_ : Optional[int] ):
super().__init__(SCREAMING_SNAKE_CASE_ )
super()._init_backbone(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Optional[int] = [config.embedding_size] + config.hidden_sizes
__lowerCamelCase: str = ResNetEmbeddings(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Optional[Any] = ResNetEncoder(SCREAMING_SNAKE_CASE_ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@replace_return_docstrings(output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , SCREAMING_SNAKE_CASE_ : Tensor , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None ):
__lowerCamelCase: Dict = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase: Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase: str = self.embedder(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Dict = self.encoder(SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: List[str] = outputs.hidden_states
__lowerCamelCase: Any = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
__lowerCamelCase: Union[str, Any] = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=SCREAMING_SNAKE_CASE_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=SCREAMING_SNAKE_CASE_ , )
| 189 | 1 |
'''simple docstring'''
import requests
lowerCamelCase : Dict = '' # <-- Put your OpenWeatherMap appid here!
lowerCamelCase : List[str] = 'https://api.openweathermap.org/data/2.5/'
def _SCREAMING_SNAKE_CASE (A = "Chicago" , A = APPID ) -> dict:
"""simple docstring"""
return requests.get(URL_BASE + '''weather''' , params=locals() ).json()
def _SCREAMING_SNAKE_CASE (A = "Kolkata, India" , A = APPID ) -> dict:
"""simple docstring"""
return requests.get(URL_BASE + '''forecast''' , params=locals() ).json()
def _SCREAMING_SNAKE_CASE (A = 55.68 , A = 12.57 , A = APPID ) -> dict:
"""simple docstring"""
return requests.get(URL_BASE + '''onecall''' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
lowerCamelCase : Union[str, Any] = input('Enter a location:').strip()
if location:
pprint(current_weather(location))
else:
break
| 460 |
'''simple docstring'''
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def _SCREAMING_SNAKE_CASE (A , A , A=1E-12 ) -> str:
"""simple docstring"""
lowercase__ = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(A , axis=1 ) , a_min=A ) ).T
lowercase__ = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(A , axis=1 ) , a_min=A ) ).T
return jnp.matmul(A , norm_emb_a.T )
class __lowerCAmelCase (nn.Module ):
'''simple docstring'''
lowerCAmelCase__ : CLIPConfig
lowerCAmelCase__ : jnp.dtype = jnp.floataa
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = FlaxCLIPVisionModule(self.config.vision_config )
lowercase__ = nn.Dense(self.config.projection_dim , use_bias=UpperCamelCase , dtype=self.dtype )
lowercase__ = self.param('''concept_embeds''' , jax.nn.initializers.ones , (17, self.config.projection_dim) )
lowercase__ = self.param(
'''special_care_embeds''' , jax.nn.initializers.ones , (3, self.config.projection_dim) )
lowercase__ = self.param('''concept_embeds_weights''' , jax.nn.initializers.ones , (17,) )
lowercase__ = self.param('''special_care_embeds_weights''' , jax.nn.initializers.ones , (3,) )
def __call__(self : Union[str, Any] , UpperCamelCase : Tuple ):
'''simple docstring'''
lowercase__ = self.vision_model(UpperCamelCase )[1]
lowercase__ = self.visual_projection(UpperCamelCase )
lowercase__ = jax_cosine_distance(UpperCamelCase , self.special_care_embeds )
lowercase__ = jax_cosine_distance(UpperCamelCase , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
lowercase__ = 0.0
lowercase__ = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
lowercase__ = jnp.round(UpperCamelCase , 3 )
lowercase__ = jnp.any(special_scores > 0 , axis=1 , keepdims=UpperCamelCase )
# Use a lower threshold if an image has any special care concept
lowercase__ = is_special_care * 0.01
lowercase__ = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
lowercase__ = jnp.round(UpperCamelCase , 3 )
lowercase__ = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = CLIPConfig
lowerCAmelCase__ : Optional[int] = """clip_input"""
lowerCAmelCase__ : Any = FlaxStableDiffusionSafetyCheckerModule
def __init__(self : Optional[int] , UpperCamelCase : CLIPConfig , UpperCamelCase : Optional[Tuple] = None , UpperCamelCase : int = 0 , UpperCamelCase : jnp.dtype = jnp.floataa , UpperCamelCase : bool = True , **UpperCamelCase : List[Any] , ):
'''simple docstring'''
if input_shape is None:
lowercase__ = (1, 224, 224, 3)
lowercase__ = self.module_class(config=UpperCamelCase , dtype=UpperCamelCase , **UpperCamelCase )
super().__init__(UpperCamelCase , UpperCamelCase , input_shape=UpperCamelCase , seed=UpperCamelCase , dtype=UpperCamelCase , _do_init=_do_init )
def UpperCamelCase__ (self : Any , UpperCamelCase : jax.random.KeyArray , UpperCamelCase : Tuple , UpperCamelCase : FrozenDict = None ):
'''simple docstring'''
lowercase__ = jax.random.normal(UpperCamelCase , UpperCamelCase )
lowercase__ ,lowercase__ = jax.random.split(UpperCamelCase )
lowercase__ = {'''params''': params_rng, '''dropout''': dropout_rng}
lowercase__ = self.module.init(UpperCamelCase , UpperCamelCase )['''params''']
return random_params
def __call__(self : Optional[Any] , UpperCamelCase : Tuple , UpperCamelCase : dict = None , ):
'''simple docstring'''
lowercase__ = jnp.transpose(UpperCamelCase , (0, 2, 3, 1) )
return self.module.apply(
{'''params''': params or self.params} , jnp.array(UpperCamelCase , dtype=jnp.floataa ) , rngs={} , )
| 460 | 1 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict=False ):
'''simple docstring'''
lowerCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'module.blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'module.blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'module.blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'module.blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'module.blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'module.blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'module.blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'module.blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'module.blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'module.blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int]=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase = """"""
else:
lowerCAmelCase = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase = state_dict.pop(F'module.blocks.{i}.attn.qkv.weight' )
lowerCAmelCase = state_dict.pop(F'module.blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase = in_proj_bias[: config.hidden_size]
lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
lowerCAmelCase = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase = dct.pop(SCREAMING_SNAKE_CASE )
lowerCAmelCase = val
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase = ViTMSNConfig()
lowerCAmelCase = 10_00
lowerCAmelCase = """datasets/huggingface/label-files"""
lowerCAmelCase = """imagenet-1k-id2label.json"""
lowerCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , """r""" ) )
lowerCAmelCase = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCAmelCase = idalabel
lowerCAmelCase = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowerCAmelCase = 3_84
lowerCAmelCase = 15_36
lowerCAmelCase = 6
elif "l16" in checkpoint_url:
lowerCAmelCase = 10_24
lowerCAmelCase = 40_96
lowerCAmelCase = 24
lowerCAmelCase = 16
lowerCAmelCase = 0.1
elif "b4" in checkpoint_url:
lowerCAmelCase = 4
elif "l7" in checkpoint_url:
lowerCAmelCase = 7
lowerCAmelCase = 10_24
lowerCAmelCase = 40_96
lowerCAmelCase = 24
lowerCAmelCase = 16
lowerCAmelCase = 0.1
lowerCAmelCase = ViTMSNModel(SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location="""cpu""" )["""target_encoder"""]
lowerCAmelCase = ViTImageProcessor(size=config.image_size )
remove_projection_head(SCREAMING_SNAKE_CASE )
lowerCAmelCase = create_rename_keys(SCREAMING_SNAKE_CASE , base_model=SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
read_in_q_k_v(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , base_model=SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
lowerCAmelCase = ViTImageProcessor(
size=config.image_size , image_mean=SCREAMING_SNAKE_CASE , image_std=SCREAMING_SNAKE_CASE )
lowerCAmelCase = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
lowerCAmelCase = model(**SCREAMING_SNAKE_CASE )
lowerCAmelCase = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowerCAmelCase = torch.tensor([[-1.09_15, -1.48_76, -1.18_09]] )
elif "b16" in checkpoint_url:
lowerCAmelCase = torch.tensor([[14.28_89, -18.90_45, 11.72_81]] )
elif "l16" in checkpoint_url:
lowerCAmelCase = torch.tensor([[41.50_28, -22.86_81, 45.64_75]] )
elif "b4" in checkpoint_url:
lowerCAmelCase = torch.tensor([[-4.38_68, 5.29_32, -0.41_37]] )
else:
lowerCAmelCase = torch.tensor([[-0.17_92, -0.64_65, 2.42_63]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 393 |
"""simple docstring"""
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if index == number_of_items:
return 0
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = knapsack(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index + 1 )
if weights[index] <= max_weight:
lowerCAmelCase = values[index] + knapsack(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , max_weight - weights[index] , index + 1 )
return max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 393 | 1 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
_lowerCamelCase : Tuple = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def _UpperCAmelCase (UpperCamelCase_ : int ):
'''simple docstring'''
if isinstance(UpperCamelCase_ , torch.Tensor ):
return image
elif isinstance(UpperCamelCase_ , PIL.Image.Image ):
_lowerCAmelCase : Tuple = [image]
_lowerCAmelCase : Union[str, Any] = [trans(img.convert("""RGB""" ) ) for img in image]
_lowerCAmelCase : Union[str, Any] = torch.stack(UpperCamelCase_ )
return image
class __snake_case (_a ):
def __init__( self : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict ) -> Any:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
_lowerCAmelCase : Tuple = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCAmelCase : Any ) -> Optional[Any]:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}" )
def SCREAMING_SNAKE_CASE ( self : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : Tuple = min(int(num_inference_steps * strength ) , _UpperCAmelCase )
_lowerCAmelCase : Any = max(num_inference_steps - init_timestep , 0 )
_lowerCAmelCase : Dict = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any]=None ) -> Union[str, Any]:
'''simple docstring'''
if not isinstance(_UpperCAmelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_UpperCAmelCase )}" )
_lowerCAmelCase : List[str] = image.to(device=_UpperCAmelCase , dtype=_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(_UpperCAmelCase )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
_lowerCAmelCase : Tuple = init_latents.shape
_lowerCAmelCase : int = randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase , device=_UpperCAmelCase , dtype=_UpperCAmelCase )
# get latents
print("""add noise to latents at timestep""" , _UpperCAmelCase )
_lowerCAmelCase : Optional[int] = self.scheduler.add_noise(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase : Tuple = init_latents
return latents
@torch.no_grad()
def __call__( self : List[Any] , _UpperCAmelCase : Union[torch.FloatTensor, PIL.Image.Image] = None , _UpperCAmelCase : float = 0.8 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : int = 50 , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[str] = "pil" , _UpperCAmelCase : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
self.check_inputs(_UpperCAmelCase )
# 2. Preprocess image
_lowerCAmelCase : Any = preprocess(_UpperCAmelCase )
# 3. set timesteps
self.scheduler.set_timesteps(_UpperCAmelCase , device=self.device )
_lowerCAmelCase , _lowerCAmelCase : str = self.get_timesteps(_UpperCAmelCase , _UpperCAmelCase , self.device )
_lowerCAmelCase : Any = timesteps[:1].repeat(_UpperCAmelCase )
# 4. Prepare latent variables
_lowerCAmelCase : int = self.prepare_latents(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , self.unet.dtype , self.device , _UpperCAmelCase )
_lowerCAmelCase : Union[str, Any] = latents
# 5. Denoising loop
for t in self.progress_bar(_UpperCAmelCase ):
# 1. predict noise model_output
_lowerCAmelCase : Optional[int] = self.unet(_UpperCAmelCase , _UpperCAmelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_lowerCAmelCase : int = self.scheduler.step(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , eta=_UpperCAmelCase , use_clipped_model_output=_UpperCAmelCase , generator=_UpperCAmelCase , ).prev_sample
_lowerCAmelCase : Dict = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCAmelCase : Union[str, Any] = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=_UpperCAmelCase )
| 429 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
_lowerCamelCase : Optional[Any] = "true"
def _UpperCAmelCase (UpperCamelCase_ : Any , UpperCamelCase_ : List[Any]=82 , UpperCamelCase_ : List[str]=16 ):
'''simple docstring'''
set_seed(42 )
_lowerCAmelCase : Optional[Any] = RegressionModel()
_lowerCAmelCase : List[Any] = deepcopy(UpperCamelCase_ )
_lowerCAmelCase : Tuple = RegressionDataset(length=UpperCamelCase_ )
_lowerCAmelCase : int = DataLoader(UpperCamelCase_ , batch_size=UpperCamelCase_ )
model.to(accelerator.device )
_lowerCAmelCase , _lowerCAmelCase : Dict = accelerator.prepare(UpperCamelCase_ , UpperCamelCase_ )
return model, ddp_model, dataloader
def _UpperCAmelCase (UpperCamelCase_ : Accelerator , UpperCamelCase_ : int=False ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/mrpc-bert-base-cased""" )
_lowerCAmelCase : str = load_dataset("""glue""" , """mrpc""" , split="""validation""" )
def tokenize_function(UpperCamelCase_ : Optional[int] ):
_lowerCAmelCase : str = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ )
return outputs
with accelerator.main_process_first():
_lowerCAmelCase : str = dataset.map(
UpperCamelCase_ , batched=UpperCamelCase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
_lowerCAmelCase : int = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(UpperCamelCase_ : Optional[int] ):
if use_longest:
return tokenizer.pad(UpperCamelCase_ , padding="""longest""" , return_tensors="""pt""" )
return tokenizer.pad(UpperCamelCase_ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return DataLoader(UpperCamelCase_ , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=16 )
def _UpperCAmelCase (UpperCamelCase_ : int , UpperCamelCase_ : int ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = Accelerator(dispatch_batches=UpperCamelCase_ , split_batches=UpperCamelCase_ )
_lowerCAmelCase : Any = get_dataloader(UpperCamelCase_ , not dispatch_batches )
_lowerCAmelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained(
"""hf-internal-testing/mrpc-bert-base-cased""" , return_dict=UpperCamelCase_ )
_lowerCAmelCase , _lowerCAmelCase : List[str] = accelerator.prepare(UpperCamelCase_ , UpperCamelCase_ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _UpperCAmelCase (UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase : str = []
for batch in dataloader:
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = batch.values()
with torch.no_grad():
_lowerCAmelCase : Optional[int] = model(UpperCamelCase_ )
_lowerCAmelCase , _lowerCAmelCase : int = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
_lowerCAmelCase , _lowerCAmelCase : List[Any] = [], []
for logit, targ in logits_and_targets:
logits.append(UpperCamelCase_ )
targs.append(UpperCamelCase_ )
_lowerCAmelCase , _lowerCAmelCase : Dict = torch.cat(UpperCamelCase_ ), torch.cat(UpperCamelCase_ )
return logits, targs
def _UpperCAmelCase (UpperCamelCase_ : Accelerator , UpperCamelCase_ : Any=82 , UpperCamelCase_ : str=False , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : Dict=16 ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = get_basic_setup(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
_lowerCAmelCase , _lowerCAmelCase : List[str] = generate_predictions(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
assert (
len(UpperCamelCase_ ) == num_samples
), F"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(UpperCamelCase_ )}"
def _UpperCAmelCase (UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = False ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = evaluate.load("""glue""" , """mrpc""" )
_lowerCAmelCase , _lowerCAmelCase : Tuple = get_mrpc_setup(UpperCamelCase_ , UpperCamelCase_ )
# First do baseline
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = setup["""no"""]
model.to(UpperCamelCase_ )
model.eval()
for batch in dataloader:
batch.to(UpperCamelCase_ )
with torch.inference_mode():
_lowerCAmelCase : Tuple = model(**UpperCamelCase_ )
_lowerCAmelCase : List[str] = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=UpperCamelCase_ , references=batch["""labels"""] )
_lowerCAmelCase : int = metric.compute()
# Then do distributed
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = setup["""ddp"""]
model.eval()
for batch in dataloader:
with torch.inference_mode():
_lowerCAmelCase : Dict = model(**UpperCamelCase_ )
_lowerCAmelCase : List[str] = outputs.logits.argmax(dim=-1 )
_lowerCAmelCase : Optional[int] = batch["""labels"""]
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=UpperCamelCase_ , references=UpperCamelCase_ )
_lowerCAmelCase : Optional[Any] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : Dict = Accelerator(split_batches=UpperCamelCase_ , dispatch_batches=UpperCamelCase_ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("""**Testing gather_for_metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`" )
test_mrpc(UpperCamelCase_ , UpperCamelCase_ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test torch metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
_lowerCAmelCase : List[str] = Accelerator(split_batches=UpperCamelCase_ , dispatch_batches=UpperCamelCase_ )
if accelerator.is_local_main_process:
print(F"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99" )
test_torch_metrics(UpperCamelCase_ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test last batch is not dropped when perfectly divisible**""" )
_lowerCAmelCase : Optional[int] = Accelerator()
test_torch_metrics(UpperCamelCase_ , 512 )
accelerator.state._reset_state()
def _UpperCAmelCase (UpperCamelCase_ : List[Any] ):
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 429 | 1 |
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
UpperCAmelCase : Tuple = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
UpperCAmelCase : str = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
UpperCAmelCase : Tuple = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def __a ( _lowercase , _lowercase ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = len([g for position, g in enumerate(_lowercase ) if g == main_target[position]] )
return (item, float(_lowercase ))
def __a ( _lowercase , _lowercase ):
"""simple docstring"""
lowerCamelCase__ : Dict = random.randint(0 , len(_lowercase ) - 1 )
lowerCamelCase__ : Optional[int] = parent_a[:random_slice] + parent_a[random_slice:]
lowerCamelCase__ : Any = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __a ( _lowercase , _lowercase ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = list(_lowercase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
lowerCamelCase__ : str = random.choice(_lowercase )
return "".join(_lowercase )
def __a ( _lowercase , _lowercase , _lowercase , ):
"""simple docstring"""
lowerCamelCase__ : Any = []
# Generate more children proportionally to the fitness score.
lowerCamelCase__ : Tuple = int(parent_a[1] * 100 ) + 1
lowerCamelCase__ : str = 10 if child_n >= 10 else child_n
for _ in range(_lowercase ):
lowerCamelCase__ : Optional[Any] = population_score[random.randint(0 , _lowercase )][0]
lowerCamelCase__ , lowerCamelCase__ : Tuple = crossover(parent_a[0] , _lowercase )
# Append new string to the population list.
pop.append(mutate(_lowercase , _lowercase ) )
pop.append(mutate(_lowercase , _lowercase ) )
return pop
def __a ( _lowercase , _lowercase , _lowercase = True ):
"""simple docstring"""
if N_POPULATION < N_SELECTED:
lowerCamelCase__ : Optional[int] = f"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(_lowercase )
# Verify that the target contains no genes besides the ones inside genes variable.
lowerCamelCase__ : Union[str, Any] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
lowerCamelCase__ : Optional[Any] = f"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(_lowercase )
# Generate random starting population.
lowerCamelCase__ : List[str] = []
for _ in range(_lowercase ):
population.append(''''''.join([random.choice(_lowercase ) for i in range(len(_lowercase ) )] ) )
# Just some logs to know what the algorithms is doing.
lowerCamelCase__ , lowerCamelCase__ : str = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_lowercase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
lowerCamelCase__ : Union[str, Any] = [evaluate(_lowercase , _lowercase ) for item in population]
# Check if there is a matching evolution.
lowerCamelCase__ : Optional[Any] = sorted(_lowercase , key=lambda _lowercase : x[1] , reverse=_lowercase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f"""\nGeneration: {generation}"""
f"""\nTotal Population:{total_population}"""
f"""\nBest score: {population_score[0][1]}"""
f"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
lowerCamelCase__ : str = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_lowercase )
# Normalize population score to be between 0 and 1.
lowerCamelCase__ : Optional[int] = [
(item, score / len(_lowercase )) for item, score in population_score
]
# This is selection
for i in range(_lowercase ):
population.extend(select(population_score[int(_lowercase )] , _lowercase , _lowercase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_lowercase ) > N_POPULATION:
break
if __name__ == "__main__":
UpperCAmelCase : List[str] = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
UpperCAmelCase : str = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 121 | """simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : Tuple = {"vocab_file": "spm_char.model"}
UpperCAmelCase : Union[str, Any] = {
"vocab_file": {
"microsoft/speecht5_asr": "https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model",
"microsoft/speecht5_tts": "https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model",
"microsoft/speecht5_vc": "https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model",
}
}
UpperCAmelCase : Tuple = {
"microsoft/speecht5_asr": 1024,
"microsoft/speecht5_tts": 1024,
"microsoft/speecht5_vc": 1024,
}
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = ['''input_ids''', '''attention_mask''']
def __init__( self :Union[str, Any] ,__UpperCAmelCase :Dict ,__UpperCAmelCase :List[Any]="<s>" ,__UpperCAmelCase :Optional[int]="</s>" ,__UpperCAmelCase :List[Any]="<unk>" ,__UpperCAmelCase :Tuple="<pad>" ,__UpperCAmelCase :Optional[Dict[str, Any]] = None ,**__UpperCAmelCase :Optional[int] ,) -> None:
"""simple docstring"""
lowerCamelCase__ : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCAmelCase ,eos_token=__UpperCAmelCase ,unk_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__UpperCAmelCase ,)
lowerCamelCase__ : Union[str, Any] = vocab_file
lowerCamelCase__ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
@property
def lowercase_ ( self :Optional[int] ) -> Dict:
"""simple docstring"""
return self.sp_model.get_piece_size()
def lowercase_ ( self :Optional[int] ) -> Any:
"""simple docstring"""
lowerCamelCase__ : Tuple = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self :Tuple ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : List[str] = self.__dict__.copy()
lowerCamelCase__ : int = None
return state
def __setstate__( self :Union[str, Any] ,__UpperCAmelCase :Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : Optional[int] = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
lowerCamelCase__ : str = {}
lowerCamelCase__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase_ ( self :Any ,__UpperCAmelCase :str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(__UpperCAmelCase ,out_type=__UpperCAmelCase )
def lowercase_ ( self :str ,__UpperCAmelCase :List[Any] ) -> int:
"""simple docstring"""
return self.sp_model.piece_to_id(__UpperCAmelCase )
def lowercase_ ( self :Optional[Any] ,__UpperCAmelCase :Optional[Any] ) -> int:
"""simple docstring"""
lowerCamelCase__ : Dict = self.sp_model.IdToPiece(__UpperCAmelCase )
return token
def lowercase_ ( self :Union[str, Any] ,__UpperCAmelCase :Union[str, Any] ) -> Any:
"""simple docstring"""
lowerCamelCase__ : Tuple = []
lowerCamelCase__ : List[str] = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
lowerCamelCase__ : Union[str, Any] = []
else:
current_sub_tokens.append(__UpperCAmelCase )
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string.strip()
def lowercase_ ( self :Tuple ,__UpperCAmelCase :int ,__UpperCAmelCase :Optional[Any]=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase_ ( self :List[Any] ,__UpperCAmelCase :List[int] ,__UpperCAmelCase :Optional[List[int]] = None ,__UpperCAmelCase :bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase ,token_ids_a=__UpperCAmelCase ,already_has_special_tokens=__UpperCAmelCase )
lowerCamelCase__ : Dict = [1]
if token_ids_a is None:
return ([0] * len(__UpperCAmelCase )) + suffix_ones
return ([0] * len(__UpperCAmelCase )) + ([0] * len(__UpperCAmelCase )) + suffix_ones
def lowercase_ ( self :List[str] ,__UpperCAmelCase :str ,__UpperCAmelCase :Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase__ : List[Any] = os.path.join(
__UpperCAmelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,__UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase ,'''wb''' ) as fi:
lowerCamelCase__ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 121 | 1 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
__lowerCAmelCase = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
__lowerCAmelCase = {
'allenai/led-base-16384': 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
_snake_case = bs[:]
_snake_case = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__a )
cs.append(2**8 + n )
n += 1
_snake_case = [chr(__a ) for n in cs]
return dict(zip(__a , __a ) )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = set()
_snake_case = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_snake_case = char
return pairs
class _lowerCAmelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="replace" , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase=False , **UpperCAmelCase , ) -> str:
_snake_case = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else bos_token
_snake_case = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else eos_token
_snake_case = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else sep_token
_snake_case = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else cls_token
_snake_case = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else unk_token
_snake_case = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_snake_case = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
errors=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , **_a , )
with open(_a , encoding="""utf-8""" ) as vocab_handle:
_snake_case = json.load(_a )
_snake_case = {v: k for k, v in self.encoder.items()}
_snake_case = errors # how to handle errors in decoding
_snake_case = bytes_to_unicode()
_snake_case = {v: k for k, v in self.byte_encoder.items()}
with open(_a , encoding="""utf-8""" ) as merges_handle:
_snake_case = merges_handle.read().split("""\n""" )[1:-1]
_snake_case = [tuple(merge.split() ) for merge in bpe_merges]
_snake_case = dict(zip(_a , range(len(_a ) ) ) )
_snake_case = {}
_snake_case = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_snake_case = re.compile(R"""\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowercase (self ) -> Dict:
return len(self.encoder )
def lowercase (self ) -> Union[str, Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase (self , UpperCAmelCase ) -> Any:
if token in self.cache:
return self.cache[token]
_snake_case = tuple(_a )
_snake_case = get_pairs(_a )
if not pairs:
return token
while True:
_snake_case = min(_a , key=lambda UpperCAmelCase : self.bpe_ranks.get(_a , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
_snake_case = bigram
_snake_case = []
_snake_case = 0
while i < len(_a ):
try:
_snake_case = word.index(_a , _a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_snake_case = j
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_snake_case = tuple(_a )
_snake_case = new_word
if len(_a ) == 1:
break
else:
_snake_case = get_pairs(_a )
_snake_case = ' '.join(_a )
_snake_case = word
return word
def lowercase (self , UpperCAmelCase ) -> Dict:
_snake_case = []
for token in re.findall(self.pat , _a ):
_snake_case = ''.join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_a ).split(""" """ ) )
return bpe_tokens
def lowercase (self , UpperCAmelCase ) -> Dict:
return self.encoder.get(_a , self.encoder.get(self.unk_token ) )
def lowercase (self , UpperCAmelCase ) -> Optional[Any]:
return self.decoder.get(_a )
def lowercase (self , UpperCAmelCase ) -> Tuple:
_snake_case = ''.join(_a )
_snake_case = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def lowercase (self , UpperCAmelCase , UpperCAmelCase = None ) -> Any:
if not os.path.isdir(_a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_snake_case = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_snake_case = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_a , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_a , ensure_ascii=_a ) + """\n""" )
_snake_case = 0
with open(_a , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
_snake_case = token_index
writer.write(""" """.join(_a ) + """\n""" )
index += 1
return vocab_file, merge_file
def lowercase (self , UpperCAmelCase , UpperCAmelCase = None ) -> Dict:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_snake_case = [self.cls_token_id]
_snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase (self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> Union[str, Any]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def lowercase (self , UpperCAmelCase , UpperCAmelCase = None ) -> Union[str, Any]:
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase (self , UpperCAmelCase , UpperCAmelCase=False , **UpperCAmelCase ) -> Dict:
_snake_case = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_a ) > 0 and not text[0].isspace()):
_snake_case = ' ' + text
return (text, kwargs)
def lowercase (self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = PaddingStrategy.DO_NOT_PAD , UpperCAmelCase = None , UpperCAmelCase = None , ) -> List[str]:
_snake_case = super()._pad(
encoded_inputs=_a , max_length=_a , padding_strategy=_a , pad_to_multiple_of=_a , return_attention_mask=_a , )
# Load from model defaults
if return_attention_mask is None:
_snake_case = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_snake_case = len(encoded_inputs["""global_attention_mask"""] ) != len(_a )
if needs_to_be_padded:
_snake_case = len(_a ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_snake_case = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
_snake_case = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs | 585 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__lowerCAmelCase = """"""
__lowerCAmelCase = """"""
__lowerCAmelCase = """"""
__lowerCAmelCase = 1 # (0 is vertical, 1 is horizontal)
def UpperCAmelCase_ ():
"""simple docstring"""
_a, _a : List[str] = get_dataset(__a , __a )
print('Processing...' )
_a, _a, _a : Optional[Any] = update_image_and_anno(__a , __a , __a )
for index, image in enumerate(__a ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_a : int = random_chars(3_2 )
_a : List[Any] = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0]
_a : Optional[Any] = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(f"""/{file_root}.jpg""" , __a , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(f"""Success {index+1}/{len(__a )} with {file_name}""" )
_a : Dict = []
for anno in new_annos[index]:
_a : Dict = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(__a )
with open(f"""/{file_root}.txt""" , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def UpperCAmelCase_ (__a : str , __a : str ):
"""simple docstring"""
_a : Optional[int] = []
_a : Optional[int] = []
for label_file in glob.glob(os.path.join(__a , '*.txt' ) ):
_a : List[str] = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(__a ) as in_file:
_a : Tuple = in_file.readlines()
_a : Any = os.path.join(__a , f"""{label_name}.jpg""" )
_a : str = []
for obj_list in obj_lists:
_a : Dict = obj_list.rstrip('\n' ).split(' ' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__a )
labels.append(__a )
return img_paths, labels
def UpperCAmelCase_ (__a : list , __a : list , __a : int = 1 ):
"""simple docstring"""
_a : List[Any] = []
_a : List[Any] = []
_a : str = []
for idx in range(len(__a ) ):
_a : Dict = []
_a : Union[str, Any] = img_list[idx]
path_list.append(__a )
_a : str = anno_list[idx]
_a : Dict = cva.imread(__a )
if flip_type == 1:
_a : Tuple = cva.flip(__a , __a )
for bbox in img_annos:
_a : List[Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
_a : List[Any] = cva.flip(__a , __a )
for bbox in img_annos:
_a : Tuple = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__a )
new_imgs_list.append(__a )
return new_imgs_list, new_annos_lists, path_list
def UpperCAmelCase_ (__a : int = 3_2 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
_a : Dict = ascii_lowercase + digits
return "".join(random.choice(__a ) for _ in range(__a ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 229 | 0 |
from __future__ import annotations
def __UpperCamelCase ( _lowerCAmelCase ) -> bool:
"""simple docstring"""
A : Dict = len(_lowerCAmelCase )
# We need to create solution object to save path.
A : str = [[0 for _ in range(_lowerCAmelCase )] for _ in range(_lowerCAmelCase )]
A : List[Any] = run_maze(_lowerCAmelCase , 0 , 0 , _lowerCAmelCase )
if solved:
print("""\n""".join(str(_lowerCAmelCase ) for row in solutions ) )
else:
print("""No solution exists!""" )
return solved
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> bool:
"""simple docstring"""
A : Dict = len(_lowerCAmelCase )
# Final check point.
if i == j == (size - 1):
A : Optional[Any] = 1
return True
A : Optional[Any] = (not i < 0) and (not j < 0) # Check lower bounds
A : List[str] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
A : List[Any] = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
A : Optional[int] = 1
# check for directions
if (
run_maze(_lowerCAmelCase , i + 1 , _lowerCAmelCase , _lowerCAmelCase )
or run_maze(_lowerCAmelCase , _lowerCAmelCase , j + 1 , _lowerCAmelCase )
or run_maze(_lowerCAmelCase , i - 1 , _lowerCAmelCase , _lowerCAmelCase )
or run_maze(_lowerCAmelCase , _lowerCAmelCase , j - 1 , _lowerCAmelCase )
):
return True
A : int = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 520 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=13, lowerCamelCase__=3, lowerCamelCase__=224, lowerCamelCase__=30, lowerCamelCase__=400, lowerCamelCase__=True, lowerCamelCase__=None, lowerCamelCase__=True, lowerCamelCase__=[0.5, 0.5, 0.5], lowerCamelCase__=[0.5, 0.5, 0.5], ):
A : Dict = size if size is not None else {"""height""": 18, """width""": 18}
A : Optional[int] = parent
A : int = batch_size
A : List[Any] = num_channels
A : Optional[Any] = image_size
A : Union[str, Any] = min_resolution
A : List[str] = max_resolution
A : List[Any] = do_resize
A : List[Any] = size
A : Union[str, Any] = do_normalize
A : Union[str, Any] = image_mean
A : str = image_std
def _lowerCAmelCase ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[str] = ViTImageProcessor if is_vision_available() else None
def _lowerCAmelCase ( self ):
A : List[str] = EfficientFormerImageProcessorTester(self )
@property
def _lowerCAmelCase ( self ):
return self.image_proc_tester.prepare_image_processor_dict()
def _lowerCAmelCase ( self ):
A : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__, """image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase__, """image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase__, """do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase__, """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase__, """size""" ) )
def _lowerCAmelCase ( self ):
pass
def _lowerCAmelCase ( self ):
# Initialize image_processor
A : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A : List[Any] = prepare_image_inputs(self.image_proc_tester, equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__, Image.Image )
# Test not batched input
A : List[str] = image_processor(image_inputs[0], return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
), )
# Test batched
A : Dict = image_processor(lowerCamelCase__, return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
), )
def _lowerCAmelCase ( self ):
# Initialize image_processor
A : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A : str = prepare_image_inputs(self.image_proc_tester, equal_resolution=lowerCamelCase__, numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__, np.ndarray )
# Test not batched input
A : Union[str, Any] = image_processor(image_inputs[0], return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
), )
# Test batched
A : Any = image_processor(lowerCamelCase__, return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
), )
def _lowerCAmelCase ( self ):
# Initialize image_processor
A : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A : Optional[Any] = prepare_image_inputs(self.image_proc_tester, equal_resolution=lowerCamelCase__, torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__, torch.Tensor )
# Test not batched input
A : str = image_processor(image_inputs[0], return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
), )
# Test batched
A : Tuple = image_processor(lowerCamelCase__, return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
), )
| 520 | 1 |
'''simple docstring'''
from __future__ import annotations
import requests
def snake_case ( a_ : str ) -> dict:
"""simple docstring"""
UpperCamelCase_ : Tuple = f"https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"
return requests.get(a_ ).json()
def snake_case ( a_ : int = 10 ) -> list[dict]:
"""simple docstring"""
UpperCamelCase_ : Any = """https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"""
UpperCamelCase_ : Dict = requests.get(a_ ).json()[:max_stories]
return [get_hackernews_story(a_ ) for story_id in story_ids]
def snake_case ( a_ : int = 10 ) -> str:
"""simple docstring"""
UpperCamelCase_ : List[str] = hackernews_top_stories(a_ )
return "\n".join("""* [{title}]({url})""".format(**a_ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 208 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCamelCase =logging.get_logger(__name__)
class A ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__a : List[Any] = ['''input_features''', '''attention_mask''']
def __init__( self , __lowerCAmelCase=80 , __lowerCAmelCase=1_60_00 , __lowerCAmelCase=80 , __lowerCAmelCase=0.0 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , **__lowerCAmelCase , ):
super().__init__(feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , **__lowerCAmelCase )
UpperCamelCase_ : int = num_mel_bins
UpperCamelCase_ : Union[str, Any] = do_ceptral_normalize
UpperCamelCase_ : Any = normalize_means
UpperCamelCase_ : int = normalize_vars
UpperCamelCase_ : int = True
def _UpperCAmelCase ( self , __lowerCAmelCase , ):
UpperCamelCase_ : Optional[Any] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
UpperCamelCase_ : Optional[int] = torch.from_numpy(__lowerCAmelCase ).unsqueeze(0 )
UpperCamelCase_ : Tuple = ta_kaldi.fbank(__lowerCAmelCase , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def _UpperCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = 0.0 , ):
# make sure we normalize float32 arrays
if normalize_means:
UpperCamelCase_ : int = x[:input_length].mean(axis=0 )
UpperCamelCase_ : str = np.subtract(__lowerCAmelCase , __lowerCAmelCase )
if normalize_vars:
UpperCamelCase_ : Optional[int] = x[:input_length].std(axis=0 )
UpperCamelCase_ : Optional[Any] = np.divide(__lowerCAmelCase , __lowerCAmelCase )
if input_length < x.shape[0]:
UpperCamelCase_ : Any = padding_value
# make sure array is in float32
UpperCamelCase_ : str = x.astype(np.floataa )
return x
def _UpperCAmelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
UpperCamelCase_ : List[str] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(__lowerCAmelCase , __lowerCAmelCase , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(__lowerCAmelCase , __lowerCAmelCase )
]
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
F" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
F" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
UpperCamelCase_ : Optional[int] = isinstance(__lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
UpperCamelCase_ : List[str] = is_batched_numpy or (
isinstance(__lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase_ : str = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray ):
UpperCamelCase_ : Optional[Any] = np.asarray(__lowerCAmelCase , dtype=np.floataa )
elif isinstance(__lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase_ : int = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase_ : Tuple = [raw_speech]
# extract fbank features
UpperCamelCase_ : Any = [self._extract_fbank_features(__lowerCAmelCase ) for waveform in raw_speech]
# convert into correct format for padding
UpperCamelCase_ : Tuple = BatchFeature({"""input_features""": features} )
UpperCamelCase_ : Tuple = self.pad(
__lowerCAmelCase , padding=__lowerCAmelCase , max_length=__lowerCAmelCase , truncation=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , )
# make sure list is in array format
UpperCamelCase_ : str = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , __lowerCAmelCase ):
UpperCamelCase_ : Any = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for feature in input_features]
UpperCamelCase_ : Union[str, Any] = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
UpperCamelCase_ : Union[str, Any] = [np.asarray(__lowerCAmelCase , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
UpperCamelCase_ : List[str] = (
np.array(__lowerCAmelCase , dtype=np.intaa )
if self._get_padding_strategies(__lowerCAmelCase , max_length=__lowerCAmelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
UpperCamelCase_ : Optional[int] = self.normalize(
padded_inputs["""input_features"""] , attention_mask=__lowerCAmelCase )
if return_tensors is not None:
UpperCamelCase_ : Optional[Any] = padded_inputs.convert_to_tensors(__lowerCAmelCase )
return padded_inputs
| 208 | 1 |
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase_ : str = logging.getLogger()
def _lowerCAmelCase(a : Path , a : list ) -> str:
_SCREAMING_SNAKE_CASE ='''\n'''.join(a )
Path(a ).open('''w''' ).writelines(a )
UpperCAmelCase_ : Any = '''patrickvonplaten/t5-tiny-random'''
UpperCAmelCase_ : str = '''sshleifer/bart-tiny-random'''
UpperCAmelCase_ : str = '''sshleifer/tiny-mbart'''
UpperCAmelCase_ : Any = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class __UpperCAmelCase ( lowercase_ ):
'''simple docstring'''
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
_SCREAMING_SNAKE_CASE =input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
_SCREAMING_SNAKE_CASE =[''' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.''']
_dump_articles(UpperCamelCase__ , UpperCamelCase__ )
_SCREAMING_SNAKE_CASE =str(Path(self.get_auto_remove_tmp_dir() ) / '''scores.json''' )
_SCREAMING_SNAKE_CASE ='''translation_en_to_de''' if model == T5_TINY else '''summarization'''
_SCREAMING_SNAKE_CASE =f"""
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_generate()
assert Path(UpperCamelCase__ ).exists()
# os.remove(Path(output_file_name))
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.run_eval_tester(UpperCamelCase__ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
self.run_eval_tester(UpperCamelCase__ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
_SCREAMING_SNAKE_CASE =input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
_SCREAMING_SNAKE_CASE ={
'''en''': ['''Machine learning is great, isn\'t it?''', '''I like to eat bananas''', '''Tomorrow is another great day!'''],
'''de''': [
'''Maschinelles Lernen ist großartig, oder?''',
'''Ich esse gerne Bananen''',
'''Morgen ist wieder ein toller Tag!''',
],
}
_SCREAMING_SNAKE_CASE =Path(self.get_auto_remove_tmp_dir() )
_SCREAMING_SNAKE_CASE =str(tmp_dir / '''scores.json''' )
_SCREAMING_SNAKE_CASE =str(tmp_dir / '''val.target''' )
_dump_articles(UpperCamelCase__ , text['''en'''] )
_dump_articles(UpperCamelCase__ , text['''de'''] )
_SCREAMING_SNAKE_CASE ='''translation_en_to_de''' if model == T5_TINY else '''summarization'''
_SCREAMING_SNAKE_CASE =f"""
run_eval_search.py
{model}
{str(UpperCamelCase__ )}
{str(UpperCamelCase__ )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
""".split()
testargs.extend(['''--search''', '''num_beams=1:2 length_penalty=0.9:1.0'''] )
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
with CaptureStdout() as cs:
run_search()
_SCREAMING_SNAKE_CASE =[''' num_beams | length_penalty''', model, '''Best score args''']
_SCREAMING_SNAKE_CASE =['''Info''']
if "translation" in task:
expected_strings.append('''bleu''' )
else:
expected_strings.extend(UpperCamelCase__ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(UpperCamelCase__ ).exists()
os.remove(Path(UpperCamelCase__ ) )
| 711 |
"""simple docstring"""
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ : List[Any] = 2_5_6
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
lowercase : Any = ["melgan"]
def __init__( self , _A , _A , _A , _A , _A , ):
'''simple docstring'''
super().__init__()
# From MELGAN
_SCREAMING_SNAKE_CASE =math.log(1E-5 ) # Matches MelGAN training.
_SCREAMING_SNAKE_CASE =4.0 # Largest value for most examples
_SCREAMING_SNAKE_CASE =1_2_8
self.register_modules(
notes_encoder=_A , continuous_encoder=_A , decoder=_A , scheduler=_A , melgan=_A , )
def UpperCamelCase_ ( self , _A , _A=(-1.0, 1.0) , _A=False ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =output_range
if clip:
_SCREAMING_SNAKE_CASE =torch.clip(_A , self.min_value , self.max_value )
# Scale to [0, 1].
_SCREAMING_SNAKE_CASE =(features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def UpperCamelCase_ ( self , _A , _A=(-1.0, 1.0) , _A=False ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =input_range
_SCREAMING_SNAKE_CASE =torch.clip(_A , _A , _A ) if clip else outputs
# Scale to [0, 1].
_SCREAMING_SNAKE_CASE =(outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def UpperCamelCase_ ( self , _A , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =input_tokens > 0
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.notes_encoder(
encoder_input_tokens=_A , encoder_inputs_mask=_A )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.continuous_encoder(
encoder_inputs=_A , encoder_inputs_mask=_A )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def UpperCamelCase_ ( self , _A , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =noise_time
if not torch.is_tensor(_A ):
_SCREAMING_SNAKE_CASE =torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(_A ) and len(timesteps.shape ) == 0:
_SCREAMING_SNAKE_CASE =timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_SCREAMING_SNAKE_CASE =timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
_SCREAMING_SNAKE_CASE =self.decoder(
encodings_and_masks=_A , decoder_input_tokens=_A , decoder_noise_time=_A )
return logits
@torch.no_grad()
def __call__( self , _A , _A = None , _A = 1_0_0 , _A = True , _A = "numpy" , _A = None , _A = 1 , ):
'''simple docstring'''
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_A , _A ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(_A )}.""" )
_SCREAMING_SNAKE_CASE =np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =np.zeros([1, 0, self.n_dims] , np.floataa )
_SCREAMING_SNAKE_CASE =torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_A , device=self.device )
for i, encoder_input_tokens in enumerate(_A ):
if i == 0:
_SCREAMING_SNAKE_CASE =torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
_SCREAMING_SNAKE_CASE =torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_A , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
_SCREAMING_SNAKE_CASE =ones
_SCREAMING_SNAKE_CASE =self.scale_features(
_A , output_range=[-1.0, 1.0] , clip=_A )
_SCREAMING_SNAKE_CASE =self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_A , continuous_mask=_A , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
_SCREAMING_SNAKE_CASE =randn_tensor(
shape=encoder_continuous_inputs.shape , generator=_A , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(_A )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
_SCREAMING_SNAKE_CASE =self.decode(
encodings_and_masks=_A , input_tokens=_A , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
_SCREAMING_SNAKE_CASE =self.scheduler.step(_A , _A , _A , generator=_A ).prev_sample
_SCREAMING_SNAKE_CASE =self.scale_to_features(_A , input_range=[-1.0, 1.0] )
_SCREAMING_SNAKE_CASE =mel[:1]
_SCREAMING_SNAKE_CASE =mel.cpu().float().numpy()
_SCREAMING_SNAKE_CASE =np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_A , _A )
logger.info('''Generated segment''' , _A )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
_SCREAMING_SNAKE_CASE =self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
_SCREAMING_SNAKE_CASE =full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=_A )
| 165 | 0 |
from __future__ import annotations
from math import pi, sqrt
def a_ ( __lowercase : float , __lowercase : float ) -> tuple:
if inductance <= 0:
raise ValueError('Inductance cannot be 0 or negative' )
elif capacitance <= 0:
raise ValueError('Capacitance cannot be 0 or negative' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 686 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
_lowerCamelCase : Dict = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : int = "sequence-classification"
def __init__( self : Optional[int] , lowercase : Any ):
'''simple docstring'''
if type(lowercase ) == dict:
_snake_case = Namespace(**lowercase )
_snake_case = glue_output_modes[hparams.task]
_snake_case = glue_tasks_num_labels[hparams.task]
super().__init__(lowercase , lowercase , self.mode )
def A ( self : Optional[Any] , **lowercase : Optional[Any] ):
'''simple docstring'''
return self.model(**lowercase )
def A ( self : Optional[Any] , lowercase : str , lowercase : Tuple ):
'''simple docstring'''
_snake_case = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_snake_case = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_snake_case = self(**lowercase )
_snake_case = outputs[0]
_snake_case = self.trainer.lr_schedulers[0]['scheduler']
_snake_case = {'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = self.hparams
_snake_case = processors[args.task]()
_snake_case = processor.get_labels()
for mode in ["train", "dev"]:
_snake_case = self._feature_file(lowercase )
if os.path.exists(lowercase ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , lowercase )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
_snake_case = (
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
_snake_case = convert_examples_to_features(
lowercase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , lowercase )
torch.save(lowercase , lowercase )
def A ( self : Dict , lowercase : str , lowercase : int , lowercase : bool = False ):
'''simple docstring'''
_snake_case = 'dev' if mode == 'test' else mode
_snake_case = self._feature_file(lowercase )
logger.info('Loading features from cached file %s' , lowercase )
_snake_case = torch.load(lowercase )
_snake_case = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_snake_case = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_snake_case = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_snake_case = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_snake_case = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(lowercase , lowercase , lowercase , lowercase ) , batch_size=lowercase , shuffle=lowercase , )
def A ( self : str , lowercase : Optional[Any] , lowercase : str ):
'''simple docstring'''
_snake_case = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_snake_case = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_snake_case = self(**lowercase )
_snake_case , _snake_case = outputs[:2]
_snake_case = logits.detach().cpu().numpy()
_snake_case = inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def A ( self : int , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case = torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
_snake_case = np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_snake_case = np.argmax(lowercase , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_snake_case = np.squeeze(lowercase )
_snake_case = np.concatenate([x['target'] for x in outputs] , axis=0 )
_snake_case = [[] for _ in range(out_label_ids.shape[0] )]
_snake_case = [[] for _ in range(out_label_ids.shape[0] )]
_snake_case = {**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , lowercase , lowercase )}
_snake_case = dict(results.items() )
_snake_case = results
return ret, preds_list, out_label_list
def A ( self : int , lowercase : list ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case = self._eval_end(lowercase )
_snake_case = ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def A ( self : List[str] , lowercase : Any ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case = self._eval_end(lowercase )
_snake_case = ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def A ( lowercase : Tuple , lowercase : Any ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(lowercase , lowercase )
parser.add_argument(
'--max_seq_length' , default=128 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=lowercase , required=lowercase , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=lowercase , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def a_ ( ) -> Union[str, Any]:
_snake_case = argparse.ArgumentParser()
add_generic_args(__lowercase , os.getcwd() )
_snake_case = GLUETransformer.add_model_specific_args(__lowercase , os.getcwd() )
_snake_case = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_snake_case = os.path.join(
'./results' , f'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
_snake_case = GLUETransformer(__lowercase )
_snake_case = generic_train(__lowercase , __lowercase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_snake_case = sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=__lowercase ) )
_snake_case = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__lowercase )
if __name__ == "__main__":
main() | 686 | 1 |
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def UpperCAmelCase ( UpperCAmelCase__ : Any):
lowerCamelCase : Optional[int] = int(__snake_case)
lowerCamelCase , lowerCamelCase , lowerCamelCase : int = t // 36_00, (t // 60) % 60, t % 60
return F'''{h}:{m:02d}:{s:02d}''' if h != 0 else F'''{m:02d}:{s:02d}'''
def UpperCAmelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any]=3_00):
return F'''\n <div>\n {prefix}\n <progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>\n {label}\n </div>\n '''
def UpperCAmelCase ( UpperCAmelCase__ : List[str]):
lowerCamelCase : List[Any] = '<table border="1" class="dataframe">\n'
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
lowerCamelCase : List[str] = F'''{elt:.6f}''' if isinstance(__snake_case , __snake_case) else str(__snake_case)
html_code += F''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class __snake_case :
_lowerCAmelCase = 5
_lowerCAmelCase = 0.2
def __init__( self, A, A = None, A = True, A = None, A = 300, ):
"""simple docstring"""
lowerCamelCase : Tuple = total
lowerCamelCase : Tuple = '' if prefix is None else prefix
lowerCamelCase : Tuple = leave
lowerCamelCase : Dict = parent
lowerCamelCase : List[Any] = width
lowerCamelCase : List[Any] = None
lowerCamelCase : Union[str, Any] = None
lowerCamelCase : Optional[int] = None
def UpperCAmelCase_ ( self, A, A = False, A = None ):
"""simple docstring"""
lowerCamelCase : int = value
if comment is not None:
lowerCamelCase : int = comment
if self.last_value is None:
lowerCamelCase : List[str] = time.time()
lowerCamelCase : int = value
lowerCamelCase : str = None
lowerCamelCase : Optional[Any] = self.warmup
lowerCamelCase : int = 1
self.update_bar(__lowerCAmelCase )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for, self.total ):
if self.first_calls > 0:
self.first_calls -= 1
lowerCamelCase : Union[str, Any] = time.time()
lowerCamelCase : List[str] = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
lowerCamelCase : Any = self.elapsed_time / (value - self.start_value)
else:
lowerCamelCase : List[str] = None
if value >= self.total:
lowerCamelCase : int = self.total
lowerCamelCase : int = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
lowerCamelCase : Tuple = self.average_time_per_item * (self.total - value)
self.update_bar(__lowerCAmelCase )
lowerCamelCase : Any = value
lowerCamelCase : Optional[Any] = current_time
if self.average_time_per_item is None:
lowerCamelCase : Dict = 1
else:
lowerCamelCase : List[Any] = max(int(self.update_every / self.average_time_per_item ), 1 )
def UpperCAmelCase_ ( self, A, A=None ):
"""simple docstring"""
lowerCamelCase : str = ' ' * (len(str(self.total ) ) - len(str(__lowerCAmelCase ) )) + str(__lowerCAmelCase )
if self.elapsed_time is None:
lowerCamelCase : int = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
lowerCamelCase : int = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
lowerCamelCase : Any = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
F''' {format_time(self.predicted_remaining )}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]'''
self.display()
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
lowerCamelCase : int = disp.display(disp.HTML(self.html_code ), display_id=__lowerCAmelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('' ) )
class __snake_case ( a__):
def __init__( self, A, A=None ):
"""simple docstring"""
super().__init__(__lowerCAmelCase )
lowerCamelCase : Optional[Any] = None if column_names is None else [column_names]
lowerCamelCase : Tuple = None
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : int = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
lowerCamelCase : Tuple = disp.display(disp.HTML(self.html_code ), display_id=__lowerCAmelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
if self.inner_table is None:
lowerCamelCase : Optional[int] = [list(values.keys() ), list(values.values() )]
else:
lowerCamelCase : Tuple = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(__lowerCAmelCase )
lowerCamelCase : Tuple = columns
self.inner_table.append([values[c] for c in columns] )
def UpperCAmelCase_ ( self, A, A=None, A=300 ):
"""simple docstring"""
lowerCamelCase : Any = NotebookProgressBar(__lowerCAmelCase, prefix=__lowerCAmelCase, parent=self, width=__lowerCAmelCase )
return self.child_bar
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = None
self.display()
class __snake_case ( a__):
def __init__( self ):
"""simple docstring"""
lowerCamelCase : List[str] = None
lowerCamelCase : Dict = None
lowerCamelCase : Any = False
def UpperCAmelCase_ ( self, A, A, A, **A ):
"""simple docstring"""
lowerCamelCase : Dict = 'Epoch' if args.evaluation_strategy == IntervalStrategy.EPOCH else 'Step'
lowerCamelCase : int = 0
lowerCamelCase : List[str] = 0
lowerCamelCase : Dict = [self.first_column] + ['Training Loss']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('Validation Loss' )
lowerCamelCase : Dict = NotebookTrainingTracker(state.max_steps, __lowerCAmelCase )
def UpperCAmelCase_ ( self, A, A, A, **A ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1, comment=F'''Epoch {epoch}/{state.num_train_epochs}''', force_update=self._force_next_update, )
lowerCamelCase : List[str] = False
def UpperCAmelCase_ ( self, A, A, A, A=None, **A ):
"""simple docstring"""
if not has_length(__lowerCAmelCase ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
lowerCamelCase : List[str] = self.training_tracker.add_child(len(__lowerCAmelCase ) )
else:
lowerCamelCase : Optional[int] = NotebookProgressBar(len(__lowerCAmelCase ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def UpperCAmelCase_ ( self, A, A, A, **A ):
"""simple docstring"""
if self.prediction_bar is not None:
self.prediction_bar.close()
lowerCamelCase : Dict = None
def UpperCAmelCase_ ( self, A, A, A, A=None, **A ):
"""simple docstring"""
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
lowerCamelCase : Dict = {'Training Loss': logs['loss']}
# First column is necessarily Step sine we're not in epoch eval strategy
lowerCamelCase : List[Any] = state.global_step
self.training_tracker.write_line(__lowerCAmelCase )
def UpperCAmelCase_ ( self, A, A, A, A=None, **A ):
"""simple docstring"""
if self.training_tracker is not None:
lowerCamelCase : Optional[int] = {'Training Loss': 'No log', 'Validation Loss': 'No log'}
for log in reversed(state.log_history ):
if "loss" in log:
lowerCamelCase : List[str] = log['loss']
break
if self.first_column == "Epoch":
lowerCamelCase : List[Any] = int(state.epoch )
else:
lowerCamelCase : Optional[int] = state.global_step
lowerCamelCase : List[Any] = 'eval'
for k in metrics:
if k.endswith('_loss' ):
lowerCamelCase : Any = re.sub(r'\_loss$', '', __lowerCAmelCase )
lowerCamelCase : Dict = metrics.pop('total_flos', __lowerCAmelCase )
lowerCamelCase : Any = metrics.pop('epoch', __lowerCAmelCase )
lowerCamelCase : Optional[int] = metrics.pop(F'''{metric_key_prefix}_runtime''', __lowerCAmelCase )
lowerCamelCase : str = metrics.pop(F'''{metric_key_prefix}_samples_per_second''', __lowerCAmelCase )
lowerCamelCase : Tuple = metrics.pop(F'''{metric_key_prefix}_steps_per_second''', __lowerCAmelCase )
lowerCamelCase : Dict = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''', __lowerCAmelCase )
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
lowerCamelCase : int = v
else:
lowerCamelCase : Union[str, Any] = k.split('_' )
lowerCamelCase : List[Any] = ' '.join([part.capitalize() for part in splits[1:]] )
lowerCamelCase : Tuple = v
self.training_tracker.write_line(__lowerCAmelCase )
self.training_tracker.remove_child()
lowerCamelCase : int = None
# Evaluation takes a long time so we should force the next update.
lowerCamelCase : Tuple = True
def UpperCAmelCase_ ( self, A, A, A, **A ):
"""simple docstring"""
self.training_tracker.update(
state.global_step, comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''', force_update=__lowerCAmelCase )
lowerCamelCase : Union[str, Any] = None
| 707 |
'''simple docstring'''
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
A = {
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
}
def UpperCAmelCase ( UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int]):
if got_ver is None or want_ver is None:
raise ValueError(
F'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
F''' reinstalling {pkg}.''')
if not ops[op](version.parse(UpperCAmelCase__) , version.parse(UpperCAmelCase__)):
raise ImportError(
F'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''')
def UpperCAmelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None):
lowerCamelCase : List[Any] = F'''\n{hint}''' if hint is not None else ''
# non-versioned check
if re.match(R'^[\w_\-\d]+$' , UpperCAmelCase__):
lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[Any] = requirement, None, None
else:
lowerCamelCase : Optional[Any] = re.findall(R'^([^!=<>\s]+)([\s!=<>]{1,2}.+)' , UpperCAmelCase__)
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'
F''' got {requirement}''')
lowerCamelCase , lowerCamelCase : Dict = match[0]
lowerCamelCase : Dict = want_full.split(',') # there could be multiple requirements
lowerCamelCase : Union[str, Any] = {}
for w in want_range:
lowerCamelCase : int = re.findall(R'^([\s!=<>]{1,2})(.+)' , UpperCAmelCase__)
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'
F''' but got {requirement}''')
lowerCamelCase , lowerCamelCase : List[Any] = match[0]
lowerCamelCase : Optional[int] = want_ver
if op not in ops:
raise ValueError(F'''{requirement}: need one of {list(ops.keys())}, but got {op}''')
# special case
if pkg == "python":
lowerCamelCase : Optional[int] = '.'.join([str(UpperCAmelCase__) for x in sys.version_info[:3]])
for op, want_ver in wanted.items():
_compare_versions(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
return
# check if any version is installed
try:
lowerCamelCase : Any = importlib.metadata.version(UpperCAmelCase__)
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''')
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
def UpperCAmelCase ( UpperCAmelCase__ : str):
lowerCamelCase : List[str] = 'Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'
return require_version(UpperCAmelCase__ , UpperCAmelCase__)
| 449 | 0 |
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class a_ ( _UpperCAmelCase ):
def _snake_case ( self : int ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self : List[Any] ) ->Any:
'''simple docstring'''
with self.assertRaises(__UpperCamelCase ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def _snake_case ( self : Tuple ) ->int:
'''simple docstring'''
with self.assertRaises(__UpperCamelCase ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) )
def _snake_case ( self : Optional[Any] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self : List[Any] ) ->int:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) )
def _snake_case ( self : int ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self : Dict ) ->str:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
def _snake_case ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def _snake_case ( self : Tuple ) ->Any:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) )
def _snake_case ( self : List[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def _snake_case ( self : Any ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def _snake_case ( self : Optional[Any] ) ->Tuple:
'''simple docstring'''
import PIL.Image
_UpperCAmelCase = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"""datasets.arrow_writer.cast_to_python_objects""" , side_effect=__UpperCamelCase ) as mock_cast_to_python_objects:
_UpperCAmelCase = pa.array(TypedSequence([{"""path""": None, """bytes""": b"""image_bytes"""}, pil_image] , type=Image() ) )
_UpperCAmelCase ,_UpperCAmelCase = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("""optimize_list_casting""" , __UpperCamelCase )
self.assertFalse(kwargs["""optimize_list_casting"""] )
def _UpperCamelCase ( _A , _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = pa.BufferReader(_A ) if isinstance(_A , pa.Buffer ) else pa.memory_map(_A )
_UpperCAmelCase = pa.ipc.open_stream(_A )
_UpperCAmelCase = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> Any:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _UpperCamelCase ( ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} )
with ArrowWriter(stream=_A , features=_A ) as writer:
writer.write({"""labels""": 0} )
writer.write({"""labels""": 1} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pa.ipc.open_stream(_A )
_UpperCAmelCase = f.read_all()
_UpperCAmelCase = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(_A )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=_A , writer_batch_size=_A , hash_salt="""split_name""" , check_duplicates=_A , ) as writer:
with pytest.raises(_A ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=[1, 2] )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 1_0] )
def _UpperCamelCase ( _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=_A , writer_batch_size=_A , hash_salt="""split_name""" , check_duplicates=_A , ) as writer:
with pytest.raises(_A ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1_0 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=1_0 )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 1_0] )
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=_A , writer_batch_size=_A , hash_salt="""split_name""" , check_duplicates=_A , ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=2 )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> Any:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
writer.write_batch({"""col_1""": [], """col_2""": []} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> str:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> int:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) )
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _UpperCamelCase ( ) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
_UpperCAmelCase = os.path.join(_A , """test.arrow""" )
with ArrowWriter(path=_A , schema=pa.schema(_A ) ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(_A , 1 )
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
if pa.types.is_list(_A ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def _UpperCamelCase ( _A , _A ) -> str:
"""simple docstring"""
if isinstance(lst[0] , _A ):
change_first_primitive_element_in_list(lst[0] , _A )
else:
_UpperCAmelCase = value
@pytest.mark.parametrize("""optimized_int_type, expected_dtype""" , [(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _UpperCamelCase ( _A , _A , _A ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = pa.array(TypedSequence(_A , optimized_int_type=_A ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"""col, expected_dtype""" , [
("""attention_mask""", pa.inta()),
("""special_tokens_mask""", pa.inta()),
("""token_type_ids""", pa.inta()),
("""input_ids""", pa.intaa()),
("""other""", pa.intaa()),
] , )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _UpperCamelCase ( _A , _A , _A ) -> Any:
"""simple docstring"""
_UpperCAmelCase = pa.array(OptimizedTypedSequence(_A , col=_A ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
_UpperCAmelCase = copy.deepcopy(_A )
_UpperCAmelCase = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(_A , _A )
_UpperCAmelCase = pa.array(OptimizedTypedSequence(_A , col=_A ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("""raise_exception""" , [False, True] )
def _UpperCamelCase ( _A , _A ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = str(tmp_path / """dataset-train.arrow""" )
try:
with ArrowWriter(path=_A ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def _UpperCamelCase ( _A ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = """mock://dataset-train.arrow"""
with ArrowWriter(path=_A , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(_A ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(_A )
def _UpperCamelCase ( ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(stream=_A ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(_A )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("""embed_local_files""" , [False, True] )
def _UpperCamelCase ( _A , _A ) -> Union[str, Any]:
"""simple docstring"""
import PIL.Image
_UpperCAmelCase = str(tmp_path / """test_image_rgb.jpg""" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(_A , format="""png""" )
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(
stream=_A , features=Features({"""image""": Image()} ) , embed_local_files=_A ) as writer:
writer.write({"""image""": image_path} )
writer.finalize()
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(_A )
_UpperCAmelCase = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["""image"""][0]["""path"""] , _A )
with open(_A , """rb""" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def _UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pa.schema([pa.field("""col_1""" , pa.string() , nullable=_A )] )
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(stream=_A ) as writer:
writer._build_writer(inferred_schema=_A )
assert writer._schema == pa.schema([pa.field("""col_1""" , pa.string() )] ) | 555 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class a_ ( unittest.TestCase ):
def _snake_case ( self : Union[str, Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase = [[1, 2, 4], [1, 2, 3, 4]]
_UpperCAmelCase = DisjunctiveConstraint(__UpperCamelCase )
self.assertTrue(isinstance(dc.token_ids , __UpperCamelCase ) )
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def _snake_case ( self : Optional[int] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint(__UpperCamelCase ) # fails here
def _snake_case ( self : Optional[int] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = [[1, 2, 3], [1, 2, 4]]
_UpperCAmelCase = DisjunctiveConstraint(__UpperCamelCase )
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = dc.update(1 )
_UpperCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = dc.update(2 )
_UpperCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = dc.update(3 )
_UpperCAmelCase = stepped is True and completed is True and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def _snake_case ( self : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
_UpperCAmelCase = DisjunctiveConstraint(__UpperCamelCase )
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] ) | 555 | 1 |
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class _a ( datasets.BeamBasedBuilder):
"""simple docstring"""
def lowercase__ ( self : Any )->List[str]:
return datasets.DatasetInfo(
features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=A__ , )
def lowercase__ ( self : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] )->Any:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )]
def lowercase__ ( self : Tuple , __UpperCamelCase : int , __UpperCamelCase : Dict )->Tuple:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
class _a ( datasets.BeamBasedBuilder):
"""simple docstring"""
def lowercase__ ( self : int )->Union[str, Any]:
return datasets.DatasetInfo(
features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=A__ , )
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : str )->List[str]:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} )
]
def lowercase__ ( self : List[str] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] )->Any:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
def lowercase ( ):
'''simple docstring'''
return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
def lowercase ( ):
'''simple docstring'''
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
class _a ( lowerCAmelCase):
"""simple docstring"""
@require_beam
def lowercase__ ( self : Optional[int] )->Dict:
_UpperCAmelCase = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_UpperCAmelCase = DummyBeamDataset(cache_dir=A__ , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , '''default''' , '''0.0.0''' , F'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
_UpperCAmelCase = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , A__ )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , A__ )
self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def lowercase__ ( self : List[str] )->Dict:
import apache_beam as beam
_UpperCAmelCase = beam.io.parquetio.WriteToParquet
_UpperCAmelCase = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_UpperCAmelCase = DummyBeamDataset(cache_dir=A__ , beam_runner='''DirectRunner''' )
with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock:
_UpperCAmelCase = partial(A__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , '''default''' , '''0.0.0''' , F'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , '''default''' , '''0.0.0''' , F'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
_UpperCAmelCase = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , A__ )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , A__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def lowercase__ ( self : str )->Any:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_UpperCAmelCase = DummyBeamDataset(cache_dir=A__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def lowercase__ ( self : Optional[int] )->Union[str, Any]:
_UpperCAmelCase = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_UpperCAmelCase = NestedBeamDataset(cache_dir=A__ , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , '''default''' , '''0.0.0''' , F'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) )
_UpperCAmelCase = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , A__ )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , A__ )
self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
| 720 |
"""simple docstring"""
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _a ( lowerCAmelCase):
"""simple docstring"""
def __init__( self : List[str] , __UpperCamelCase : NestedDataStructureLike[PathLike] , __UpperCamelCase : Optional[NamedSplit] = None , __UpperCamelCase : Optional[Features] = None , __UpperCamelCase : str = None , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[int] = None , **__UpperCamelCase : Optional[Any] , )->Any:
super().__init__(
__UpperCamelCase , split=__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase , streaming=__UpperCamelCase , num_proc=__UpperCamelCase , **__UpperCamelCase , )
_UpperCAmelCase = path_or_paths if isinstance(__UpperCamelCase , __UpperCamelCase ) else {self.split: path_or_paths}
_UpperCAmelCase = Text(
cache_dir=__UpperCamelCase , data_files=__UpperCamelCase , features=__UpperCamelCase , **__UpperCamelCase , )
def lowercase__ ( self : Optional[Any] )->str:
# Build iterable dataset
if self.streaming:
_UpperCAmelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
self.builder.download_and_prepare(
download_config=__UpperCamelCase , download_mode=__UpperCamelCase , verification_mode=__UpperCamelCase , base_path=__UpperCamelCase , num_proc=self.num_proc , )
_UpperCAmelCase = self.builder.as_dataset(
split=self.split , verification_mode=__UpperCamelCase , in_memory=self.keep_in_memory )
return dataset
| 95 | 0 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
SCREAMING_SNAKE_CASE = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
SCREAMING_SNAKE_CASE = ' def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n'
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : int ) -> List[str]:
'''simple docstring'''
lowercase : Dict =tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , '''models/bert/''' ) )
lowercase : Any =self.transformer_dir
shutil.copy(
os.path.join(UpperCAmelCase , '''src/transformers/models/bert/modeling_bert.py''' ) , os.path.join(self.transformer_dir , '''models/bert/modeling_bert.py''' ) , )
def A__ ( self : Any ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] ='''src/transformers'''
shutil.rmtree(self.transformer_dir )
def A__ ( self : str , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple=None ) -> str:
'''simple docstring'''
lowercase : str =comment + f'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
lowercase : Optional[int] =comment + f'\nclass {class_name}(nn.Module):\n' + overwrite_result
lowercase : Any =black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowercase : Optional[int] =black.format_str(UpperCAmelCase , mode=UpperCAmelCase )
lowercase : Optional[Any] =os.path.join(self.transformer_dir , '''new_code.py''' )
with open(UpperCAmelCase , '''w''' , newline='''\n''' ) as f:
f.write(UpperCAmelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(UpperCAmelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=UpperCAmelCase )
with open(UpperCAmelCase , '''r''' ) as f:
self.assertTrue(f.read() , UpperCAmelCase )
def A__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
lowercase : Union[str, Any] =check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''' )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Optional[int] ) -> str:
'''simple docstring'''
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , UpperCAmelCase , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , re.sub('''Bert''' , '''TestModel''' , UpperCAmelCase ) , )
# Copy consistency with a really long name
lowercase : Optional[int] ='''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
f'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}' , f'{long_class_name}LMPredictionHead' , re.sub('''Bert''' , UpperCAmelCase , UpperCAmelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , UpperCAmelCase , overwrite_result=re.sub('''Bert''' , '''TestModel''' , UpperCAmelCase ) , )
def A__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
lowercase : Optional[int] =check_copies.LOCALIZED_READMES['''README_zh-hans.md''']
lowercase : List[Any] =(
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'''
''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'''
''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'''
''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'''
''' Luong, Quoc V. Le, Christopher D. Manning.'''
)
lowercase : Optional[Any] =(
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowercase : Union[str, Any] =(
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'''
''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'''
''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'''
''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'''
''' Christopher D. Manning 发布。\n'''
)
lowercase , lowercase : Union[str, Any] =check_copies.convert_to_localized_md(
UpperCAmelCase , UpperCAmelCase , localized_readme['''format_model_list'''] )
self.assertFalse(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
lowercase , lowercase : int =check_copies.convert_to_localized_md(
UpperCAmelCase , UpperCAmelCase , localized_readme['''format_model_list'''] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(UpperCAmelCase )
lowercase : str =(
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'''
)
lowercase : Tuple =(
'''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'''
''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowercase : str =(
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowercase , lowercase : Optional[int] =check_copies.convert_to_localized_md(
UpperCAmelCase , UpperCAmelCase , localized_readme['''format_model_list'''] )
# Check if the model link is synchronized.
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
| 94 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
},
'tokenizer_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
SCREAMING_SNAKE_CASE = '▁'
# Segments (not really needed)
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 4
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = '''left'''
UpperCamelCase_ = XLNetTokenizer
def __init__( self : int , UpperCAmelCase : Dict=None , UpperCAmelCase : str=None , UpperCAmelCase : str=False , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : int="<s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : str="<unk>" , UpperCAmelCase : Optional[Any]="<sep>" , UpperCAmelCase : Optional[int]="<pad>" , UpperCAmelCase : Optional[Any]="<cls>" , UpperCAmelCase : Dict="<mask>" , UpperCAmelCase : int=["<eop>", "<eod>"] , **UpperCAmelCase : List[Any] , ) -> List[str]:
'''simple docstring'''
lowercase : Dict =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
super().__init__(
vocab_file=UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , remove_space=UpperCAmelCase , keep_accents=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , additional_special_tokens=UpperCAmelCase , **UpperCAmelCase , )
lowercase : Tuple =3
lowercase : Union[str, Any] =do_lower_case
lowercase : Any =remove_space
lowercase : int =keep_accents
lowercase : int =vocab_file
lowercase : Union[str, Any] =False if not self.vocab_file else True
def A__ ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Union[str, Any] =[self.sep_token_id]
lowercase : Optional[Any] =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def A__ ( self : str , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Optional[int] =[self.sep_token_id]
lowercase : Union[str, Any] =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def A__ ( self : str , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase : Dict =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ):
copyfile(self.vocab_file , UpperCAmelCase )
return (out_vocab_file,)
| 94 | 1 |
"""simple docstring"""
from PIL import Image
def A ( snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = (2_59 * (level + 2_55)) / (2_55 * (2_59 - level))
def contrast(snake_case__ ) -> int:
return int(1_28 + factor * (c - 1_28) )
return img.point(snake_case__ )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change contrast to 170
A_ : Optional[Any] = change_contrast(img, 170)
cont_img.save("image_data/lena_high_contrast.png", format="png")
| 700 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : List[Any] = logging.get_logger(__name__)
A_ : Dict = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class lowerCamelCase (A__ ):
lowerCamelCase__ : int = 'unispeech'
def __init__( self : Union[str, Any] , __UpperCAmelCase : List[Any]=3_2 , __UpperCAmelCase : Union[str, Any]=7_6_8 , __UpperCAmelCase : Tuple=1_2 , __UpperCAmelCase : Dict=1_2 , __UpperCAmelCase : Optional[Any]=3_0_7_2 , __UpperCAmelCase : Optional[Any]="gelu" , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : List[Any]=0.1 , __UpperCAmelCase : List[str]=0.1 , __UpperCAmelCase : List[str]=0.0 , __UpperCAmelCase : Tuple=0.0 , __UpperCAmelCase : List[Any]=0.1 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : Union[str, Any]=0.02 , __UpperCAmelCase : Union[str, Any]=1e-5 , __UpperCAmelCase : List[Any]="group" , __UpperCAmelCase : Union[str, Any]="gelu" , __UpperCAmelCase : Any=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __UpperCAmelCase : List[str]=(5, 2, 2, 2, 2, 2, 2) , __UpperCAmelCase : int=(1_0, 3, 3, 3, 3, 2, 2) , __UpperCAmelCase : str=False , __UpperCAmelCase : Any=1_2_8 , __UpperCAmelCase : str=1_6 , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Union[str, Any]=0.05 , __UpperCAmelCase : str=1_0 , __UpperCAmelCase : Optional[Any]=2 , __UpperCAmelCase : List[Any]=0.0 , __UpperCAmelCase : Tuple=1_0 , __UpperCAmelCase : Tuple=0 , __UpperCAmelCase : Tuple=3_2_0 , __UpperCAmelCase : Any=2 , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : Any=1_0_0 , __UpperCAmelCase : str=2_5_6 , __UpperCAmelCase : Dict=2_5_6 , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : List[str]="mean" , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : str=2_5_6 , __UpperCAmelCase : Dict=8_0 , __UpperCAmelCase : List[Any]=0 , __UpperCAmelCase : int=1 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : Any=0.5 , **__UpperCAmelCase : List[str] , ) -> Tuple:
super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = feat_extract_norm
SCREAMING_SNAKE_CASE__ = feat_extract_activation
SCREAMING_SNAKE_CASE__ = list(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = list(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = list(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = conv_bias
SCREAMING_SNAKE_CASE__ = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE__ = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE__ = len(self.conv_dim )
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_dropout
SCREAMING_SNAKE_CASE__ = attention_dropout
SCREAMING_SNAKE_CASE__ = activation_dropout
SCREAMING_SNAKE_CASE__ = feat_proj_dropout
SCREAMING_SNAKE_CASE__ = final_dropout
SCREAMING_SNAKE_CASE__ = layerdrop
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_ctc_classes
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = do_stable_layer_norm
SCREAMING_SNAKE_CASE__ = use_weighted_layer_sum
SCREAMING_SNAKE_CASE__ = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE__ = apply_spec_augment
SCREAMING_SNAKE_CASE__ = mask_time_prob
SCREAMING_SNAKE_CASE__ = mask_time_length
SCREAMING_SNAKE_CASE__ = mask_time_min_masks
SCREAMING_SNAKE_CASE__ = mask_feature_prob
SCREAMING_SNAKE_CASE__ = mask_feature_length
SCREAMING_SNAKE_CASE__ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE__ = num_codevectors_per_group
SCREAMING_SNAKE_CASE__ = num_codevector_groups
SCREAMING_SNAKE_CASE__ = contrastive_logits_temperature
SCREAMING_SNAKE_CASE__ = feat_quantizer_dropout
SCREAMING_SNAKE_CASE__ = num_negatives
SCREAMING_SNAKE_CASE__ = codevector_dim
SCREAMING_SNAKE_CASE__ = proj_codevector_dim
SCREAMING_SNAKE_CASE__ = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE__ = ctc_loss_reduction
SCREAMING_SNAKE_CASE__ = ctc_zero_infinity
# pretraining loss
SCREAMING_SNAKE_CASE__ = replace_prob
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 616 | 0 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
lowerCAmelCase__ : int = logging.getLogger(__name__)
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = """sequence-classification"""
def __init__( self : Optional[Any] , snake_case_ : str ):
'''simple docstring'''
if type(snake_case_ ) == dict:
snake_case__ : Optional[Any] = Namespace(**snake_case_ )
snake_case__ : Union[str, Any] = glue_output_modes[hparams.task]
snake_case__ : Optional[Any] = glue_tasks_num_labels[hparams.task]
super().__init__(snake_case_ , snake_case_ , self.mode )
def __magic_name__ ( self : Tuple , **snake_case_ : int ):
'''simple docstring'''
return self.model(**snake_case_ )
def __magic_name__ ( self : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : List[str] ):
'''simple docstring'''
snake_case__ : str = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
snake_case__ : Any = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
snake_case__ : Optional[int] = self(**snake_case_ )
snake_case__ : Optional[Any] = outputs[0]
snake_case__ : Tuple = self.trainer.lr_schedulers[0]['''scheduler''']
snake_case__ : Union[str, Any] = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
snake_case__ : Tuple = self.hparams
snake_case__ : Any = processors[args.task]()
snake_case__ : Optional[int] = processor.get_labels()
for mode in ["train", "dev"]:
snake_case__ : Any = self._feature_file(snake_case_ )
if os.path.exists(snake_case_ ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , snake_case_ )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
snake_case__ : int = (
processor.get_dev_examples(args.data_dir )
if mode == '''dev'''
else processor.get_train_examples(args.data_dir )
)
snake_case__ : Dict = convert_examples_to_features(
snake_case_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('''Saving features into cached file %s''' , snake_case_ )
torch.save(snake_case_ , snake_case_ )
def __magic_name__ ( self : List[str] , snake_case_ : str , snake_case_ : int , snake_case_ : bool = False ):
'''simple docstring'''
snake_case__ : Union[str, Any] = '''dev''' if mode == '''test''' else mode
snake_case__ : Any = self._feature_file(snake_case_ )
logger.info('''Loading features from cached file %s''' , snake_case_ )
snake_case__ : Optional[Any] = torch.load(snake_case_ )
snake_case__ : List[str] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
snake_case__ : Any = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
snake_case__ : List[Any] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
snake_case__ : List[str] = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
snake_case__ : Optional[int] = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) , batch_size=snake_case_ , shuffle=snake_case_ , )
def __magic_name__ ( self : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : int ):
'''simple docstring'''
snake_case__ : Tuple = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
snake_case__ : Tuple = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
snake_case__ : List[str] = self(**snake_case_ )
snake_case__ , snake_case__ : int = outputs[:2]
snake_case__ : List[Any] = logits.detach().cpu().numpy()
snake_case__ : List[Any] = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __magic_name__ ( self : Optional[Any] , snake_case_ : Optional[Any] ):
'''simple docstring'''
snake_case__ : List[Any] = torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item()
snake_case__ : int = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
snake_case__ : Dict = np.argmax(snake_case_ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
snake_case__ : List[Any] = np.squeeze(snake_case_ )
snake_case__ : Union[str, Any] = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
snake_case__ : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
snake_case__ : Dict = [[] for _ in range(out_label_ids.shape[0] )]
snake_case__ : Optional[int] = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task , snake_case_ , snake_case_ )}
snake_case__ : int = dict(results.items() )
snake_case__ : List[Any] = results
return ret, preds_list, out_label_list
def __magic_name__ ( self : int , snake_case_ : list ):
'''simple docstring'''
snake_case__ , snake_case__ , snake_case__ : List[str] = self._eval_end(snake_case_ )
snake_case__ : Optional[int] = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __magic_name__ ( self : List[str] , snake_case_ : List[str] ):
'''simple docstring'''
snake_case__ , snake_case__ , snake_case__ : List[Any] = self._eval_end(snake_case_ )
snake_case__ : Optional[int] = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __magic_name__ ( snake_case_ : Tuple , snake_case_ : Dict ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(snake_case_ , snake_case_ )
parser.add_argument(
'''--max_seq_length''' , default=1_2_8 , type=snake_case_ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--task''' , default='''''' , type=snake_case_ , required=snake_case_ , help='''The GLUE task to run''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=snake_case_ , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
def _a ( ):
"""simple docstring"""
snake_case__ : List[Any] = argparse.ArgumentParser()
add_generic_args(__lowerCAmelCase , os.getcwd() )
snake_case__ : Any = GLUETransformer.add_model_specific_args(__lowerCAmelCase , os.getcwd() )
snake_case__ : Union[str, Any] = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
snake_case__ : List[str] = os.path.join(
'''./results''' , F"""{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}""" , )
os.makedirs(args.output_dir )
snake_case__ : Optional[int] = GLUETransformer(__lowerCAmelCase )
snake_case__ : Optional[Any] = generic_train(__lowerCAmelCase , __lowerCAmelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
snake_case__ : Dict = sorted(glob.glob(os.path.join(args.output_dir , '''checkpoint-epoch=*.ckpt''' ) , recursive=__lowerCAmelCase ) )
snake_case__ : Optional[int] = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__lowerCAmelCase )
if __name__ == "__main__":
main()
| 347 |
'''simple docstring'''
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowerCAmelCase__ : int = logging.get_logger(__name__)
def _a ( __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
return [
int(10_00 * (box[0] / width) ),
int(10_00 * (box[1] / height) ),
int(10_00 * (box[2] / width) ),
int(10_00 * (box[3] / height) ),
]
def _a ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Optional[str] , __lowerCAmelCase : Optional[str] ):
"""simple docstring"""
snake_case__ : Optional[int] = to_pil_image(__lowerCAmelCase )
snake_case__ , snake_case__ : Union[str, Any] = pil_image.size
snake_case__ : int = pytesseract.image_to_data(__lowerCAmelCase , lang=__lowerCAmelCase , output_type='''dict''' , config=__lowerCAmelCase )
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ : Optional[int] = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
snake_case__ : Optional[Any] = [idx for idx, word in enumerate(__lowerCAmelCase ) if not word.strip()]
snake_case__ : Optional[int] = [word for idx, word in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices]
snake_case__ : Optional[int] = [coord for idx, coord in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices]
snake_case__ : Optional[int] = [coord for idx, coord in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices]
snake_case__ : Optional[int] = [coord for idx, coord in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices]
snake_case__ : List[str] = [coord for idx, coord in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
snake_case__ : Any = []
for x, y, w, h in zip(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
snake_case__ : int = [x, y, x + w, y + h]
actual_boxes.append(__lowerCAmelCase )
# finally, normalize the bounding boxes
snake_case__ : int = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = ["""pixel_values"""]
def __init__( self : Any , snake_case_ : bool = True , snake_case_ : Dict[str, int] = None , snake_case_ : PILImageResampling = PILImageResampling.BILINEAR , snake_case_ : bool = True , snake_case_ : float = 1 / 2_5_5 , snake_case_ : bool = True , snake_case_ : Union[float, Iterable[float]] = None , snake_case_ : Union[float, Iterable[float]] = None , snake_case_ : bool = True , snake_case_ : Optional[str] = None , snake_case_ : Optional[str] = "" , **snake_case_ : Tuple , ):
'''simple docstring'''
super().__init__(**snake_case_ )
snake_case__ : List[Any] = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
snake_case__ : Optional[Any] = get_size_dict(snake_case_ )
snake_case__ : Any = do_resize
snake_case__ : Optional[int] = size
snake_case__ : Any = resample
snake_case__ : Any = do_rescale
snake_case__ : Any = rescale_value
snake_case__ : str = do_normalize
snake_case__ : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case__ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
snake_case__ : List[str] = apply_ocr
snake_case__ : Tuple = ocr_lang
snake_case__ : List[str] = tesseract_config
def __magic_name__ ( self : List[str] , snake_case_ : np.ndarray , snake_case_ : Dict[str, int] , snake_case_ : PILImageResampling = PILImageResampling.BILINEAR , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Any , ):
'''simple docstring'''
snake_case__ : Optional[Any] = get_size_dict(snake_case_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
snake_case__ : List[str] = (size['''height'''], size['''width'''])
return resize(snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def __magic_name__ ( self : Optional[Any] , snake_case_ : np.ndarray , snake_case_ : Union[int, float] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : List[Any] , ):
'''simple docstring'''
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def __magic_name__ ( self : int , snake_case_ : np.ndarray , snake_case_ : Union[float, Iterable[float]] , snake_case_ : Union[float, Iterable[float]] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : str , ):
'''simple docstring'''
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , data_format=snake_case_ , **snake_case_ )
def __magic_name__ ( self : Any , snake_case_ : ImageInput , snake_case_ : bool = None , snake_case_ : Dict[str, int] = None , snake_case_ : List[Any]=None , snake_case_ : bool = None , snake_case_ : float = None , snake_case_ : bool = None , snake_case_ : Union[float, Iterable[float]] = None , snake_case_ : Union[float, Iterable[float]] = None , snake_case_ : bool = None , snake_case_ : Optional[str] = None , snake_case_ : Optional[str] = None , snake_case_ : Optional[Union[str, TensorType]] = None , snake_case_ : ChannelDimension = ChannelDimension.FIRST , **snake_case_ : Tuple , ):
'''simple docstring'''
snake_case__ : List[Any] = do_resize if do_resize is not None else self.do_resize
snake_case__ : Tuple = size if size is not None else self.size
snake_case__ : Union[str, Any] = get_size_dict(snake_case_ )
snake_case__ : Union[str, Any] = resample if resample is not None else self.resample
snake_case__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
snake_case__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case__ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
snake_case__ : Optional[int] = image_mean if image_mean is not None else self.image_mean
snake_case__ : int = image_std if image_std is not None else self.image_std
snake_case__ : int = apply_ocr if apply_ocr is not None else self.apply_ocr
snake_case__ : List[Any] = ocr_lang if ocr_lang is not None else self.ocr_lang
snake_case__ : str = tesseract_config if tesseract_config is not None else self.tesseract_config
snake_case__ : Union[str, Any] = make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
snake_case__ : Optional[int] = [to_numpy_array(snake_case_ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
snake_case__ : List[Any] = []
snake_case__ : Dict = []
for image in images:
snake_case__ , snake_case__ : List[Any] = apply_tesseract(snake_case_ , snake_case_ , snake_case_ )
words_batch.append(snake_case_ )
boxes_batch.append(snake_case_ )
if do_resize:
snake_case__ : List[Any] = [self.resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ ) for image in images]
if do_rescale:
snake_case__ : Optional[Any] = [self.rescale(image=snake_case_ , scale=snake_case_ ) for image in images]
if do_normalize:
snake_case__ : List[str] = [self.normalize(image=snake_case_ , mean=snake_case_ , std=snake_case_ ) for image in images]
snake_case__ : Optional[int] = [to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images]
snake_case__ : Dict = BatchFeature(data={'''pixel_values''': images} , tensor_type=snake_case_ )
if apply_ocr:
snake_case__ : List[str] = words_batch
snake_case__ : Any = boxes_batch
return data
| 347 | 1 |
from __future__ import annotations
lowercase__ :str = tuple[int, int, int]
lowercase__ :Optional[int] = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
lowercase__ :int = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# -------------------------- default selection --------------------------
# rotors --------------------------
lowercase__ :Tuple = "EGZWVONAHDCLFQMSIPJBYUKXTR"
lowercase__ :Dict = "FOBHMDKEXQNRAULPGSJVTYICZW"
lowercase__ :int = "ZJXESIUQLHAVRMDOYGTNFWPBKC"
# reflector --------------------------
lowercase__ :Tuple = {
"A": "N",
"N": "A",
"B": "O",
"O": "B",
"C": "P",
"P": "C",
"D": "Q",
"Q": "D",
"E": "R",
"R": "E",
"F": "S",
"S": "F",
"G": "T",
"T": "G",
"H": "U",
"U": "H",
"I": "V",
"V": "I",
"J": "W",
"W": "J",
"K": "X",
"X": "K",
"L": "Y",
"Y": "L",
"M": "Z",
"Z": "M",
}
# -------------------------- extra rotors --------------------------
lowercase__ :List[str] = "RMDJXFUWGISLHVTCQNKYPBEZOA"
lowercase__ :str = "SGLCPQWZHKXAREONTFBVIYJUDM"
lowercase__ :int = "HVSICLTYKQUBXDWAJZOMFGPREN"
lowercase__ :List[str] = "RZWQHFMVDBKICJLNTUXAGYPSOE"
lowercase__ :Union[str, Any] = "LFKIJODBEGAMQPXVUHYSTCZRWN"
lowercase__ :Tuple = "KOAEGVDHXPQZMLFTYWJNBRCIUS"
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(lowerCAmelCase__ ) )) < 3:
lowercase = f'Please use 3 unique rotors (not {unique_rotsel})'
raise Exception(lowerCAmelCase__ )
# Checks if rotor positions are valid
lowercase , lowercase , lowercase = rotpos
if not 0 < rotorposa <= len(lowerCAmelCase__ ):
lowercase = f'First rotor position is not within range of 1..26 ({rotorposa}'
raise ValueError(lowerCAmelCase__ )
if not 0 < rotorposa <= len(lowerCAmelCase__ ):
lowercase = f'Second rotor position is not within range of 1..26 ({rotorposa})'
raise ValueError(lowerCAmelCase__ )
if not 0 < rotorposa <= len(lowerCAmelCase__ ):
lowercase = f'Third rotor position is not within range of 1..26 ({rotorposa})'
raise ValueError(lowerCAmelCase__ )
# Validates string and returns dict
lowercase = _plugboard(lowerCAmelCase__ )
return rotpos, rotsel, pbdict
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = f'Plugboard setting isn\'t type string ({type(lowerCAmelCase__ )})'
raise TypeError(lowerCAmelCase__ )
elif len(lowerCAmelCase__ ) % 2 != 0:
lowercase = f'Odd number of symbols ({len(lowerCAmelCase__ )})'
raise Exception(lowerCAmelCase__ )
elif pbstring == "":
return {}
pbstring.replace(''' ''' , '''''' )
# Checks if all characters are unique
lowercase = set()
for i in pbstring:
if i not in abc:
lowercase = f'\'{i}\' not in list of symbols'
raise Exception(lowerCAmelCase__ )
elif i in tmppbl:
lowercase = f'Duplicate symbol ({i})'
raise Exception(lowerCAmelCase__ )
else:
tmppbl.add(lowerCAmelCase__ )
del tmppbl
# Created the dictionary
lowercase = {}
for j in range(0 , len(lowerCAmelCase__ ) - 1 , 2 ):
lowercase = pbstring[j + 1]
lowercase = pbstring[j]
return pb
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = (rotora, rotora, rotora) , lowerCAmelCase__ = "" , ):
'''simple docstring'''
lowercase = text.upper()
lowercase , lowercase , lowercase = _validator(
lowerCAmelCase__ , lowerCAmelCase__ , plugb.upper() )
lowercase , lowercase , lowercase = rotor_position
lowercase , lowercase , lowercase = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
lowercase = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
lowercase = plugboard[symbol]
# rotor ra --------------------------
lowercase = abc.index(lowerCAmelCase__ ) + rotorposa
lowercase = rotora[index % len(lowerCAmelCase__ )]
# rotor rb --------------------------
lowercase = abc.index(lowerCAmelCase__ ) + rotorposa
lowercase = rotora[index % len(lowerCAmelCase__ )]
# rotor rc --------------------------
lowercase = abc.index(lowerCAmelCase__ ) + rotorposa
lowercase = rotora[index % len(lowerCAmelCase__ )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
lowercase = reflector[symbol]
# 2nd rotors
lowercase = abc[rotora.index(lowerCAmelCase__ ) - rotorposa]
lowercase = abc[rotora.index(lowerCAmelCase__ ) - rotorposa]
lowercase = abc[rotora.index(lowerCAmelCase__ ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
lowercase = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(lowerCAmelCase__ ):
lowercase = 0
rotorposa += 1
if rotorposa >= len(lowerCAmelCase__ ):
lowercase = 0
rotorposa += 1
if rotorposa >= len(lowerCAmelCase__ ):
lowercase = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(lowerCAmelCase__ )
return "".join(lowerCAmelCase__ )
if __name__ == "__main__":
lowercase__ :int = "This is my Python script that emulates the Enigma machine from WWII."
lowercase__ :List[str] = (1, 1, 1)
lowercase__ :Any = "pictures"
lowercase__ :str = (rotora, rotora, rotora)
lowercase__ :Union[str, Any] = enigma(message, rotor_pos, rotor_sel, pb)
print("Encrypted message:", en)
print("Decrypted message:", enigma(en, rotor_pos, rotor_sel, pb))
| 633 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase = emb.weight.shape
lowercase = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
lowercase = emb.weight.data
return lin_layer
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
lowercase = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
lowercase = mam_aaa['''model''']
remove_ignore_keys_(lowerCAmelCase__ )
lowercase = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowercase = MaMaaaConfig(
vocab_size=lowerCAmelCase__ , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , )
lowercase = state_dict['''decoder.embed_tokens.weight''']
lowercase = MaMaaaForConditionalGeneration(lowerCAmelCase__ )
model.model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
lowercase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowercase__ :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
lowercase__ :Tuple = parser.parse_args()
lowercase__ :int = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 633 | 1 |
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class lowerCAmelCase__ ( _lowerCamelCase ):
def __init__( self : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : int=None , __UpperCamelCase : int=True , __UpperCamelCase : List[Any]=None , **__UpperCamelCase : Union[str, Any] ) -> Union[str, Any]:
A = parent
A = config_class
A = has_text_modality
A = kwargs
A = common_properties
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
A = self.config_class(**self.inputs_dict )
A = (
['hidden_size', 'num_attention_heads', 'num_hidden_layers']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['vocab_size'] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(__UpperCamelCase , __UpperCamelCase ) , msg=f'''`{prop}` does not exist''' )
# Test that config has the common properties as setter
for idx, name in enumerate(__UpperCamelCase ):
try:
setattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
self.parent.assertEqual(
getattr(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase , msg=f'''`{name} value {idx} expected, but was {getattr(__UpperCamelCase , __UpperCamelCase )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(__UpperCamelCase ):
try:
A = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase , msg=f'''`{name} value {idx} expected, but was {getattr(__UpperCamelCase , __UpperCamelCase )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def __UpperCamelCase ( self : List[Any] ) -> str:
A = self.config_class(**self.inputs_dict )
A = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , __UpperCamelCase )
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
A = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A = os.path.join(__UpperCamelCase , 'config.json' )
config_first.to_json_file(__UpperCamelCase )
A = self.config_class.from_json_file(__UpperCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __UpperCamelCase ( self : List[Any] ) -> str:
A = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(__UpperCamelCase )
A = self.config_class.from_pretrained(__UpperCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __UpperCamelCase ( self : Dict ) -> Optional[Any]:
A = self.config_class(**self.inputs_dict )
A = 'test'
with tempfile.TemporaryDirectory() as tmpdirname:
A = os.path.join(__UpperCamelCase , __UpperCamelCase )
config_first.save_pretrained(__UpperCamelCase )
A = self.config_class.from_pretrained(__UpperCamelCase , subfolder=__UpperCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __UpperCamelCase ( self : Optional[Any] ) -> Tuple:
A = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
A = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def __UpperCamelCase ( self : Dict ) -> str:
if self.config_class.is_composition:
return
A = self.config_class()
self.parent.assertIsNotNone(__UpperCamelCase )
def __UpperCamelCase ( self : List[str] ) -> Dict:
A = copy.deepcopy(__UpperCamelCase )
A = self.config_class(**__UpperCamelCase )
A = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('torch_dtype', config.torch_dtype, torch.floataa) )
elif getattr(__UpperCamelCase , __UpperCamelCase ) != value:
wrong_values.append((key, getattr(__UpperCamelCase , __UpperCamelCase ), value) )
if len(__UpperCamelCase ) > 0:
A = '\n'.join([f'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] )
raise ValueError(f'''The following keys were not properly set in the config:\n{errors}''' )
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init() | 106 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
lowerCAmelCase_ : List[str] = ['bert-base-uncased', 'bert-base-cased']
lowerCAmelCase_ : int = 'hf-internal-testing/tiny-bert-tf-only'
if is_tf_available():
class lowerCamelCase_ ( tf.keras.Model ):
def __init__( self : Optional[int] , lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE : List[str] = tokenizer
SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = TFAutoModel.from_config(lowerCAmelCase__ )
def __lowercase ( self : Tuple , lowerCAmelCase__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : str = self.bert(**lowerCAmelCase__ )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class lowerCamelCase_ ( unittest.TestCase ):
def __lowercase ( self : str ):
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE : int = [
BertTokenizer.from_pretrained(lowerCAmelCase__ ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
SCREAMING_SNAKE_CASE : Dict = [TFBertTokenizer.from_pretrained(lowerCAmelCase__ ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(lowerCAmelCase__ , use_fast_bert_tokenizer=lowerCAmelCase__ )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
SCREAMING_SNAKE_CASE : Any = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
SCREAMING_SNAKE_CASE : int = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def __lowercase ( self : Any ):
"""simple docstring"""
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
SCREAMING_SNAKE_CASE : Tuple = tokenizer(lowerCAmelCase__ , return_tensors='''tf''' , padding='''longest''' )
SCREAMING_SNAKE_CASE : List[Any] = tf_tokenizer(lowerCAmelCase__ )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def __lowercase ( self : Dict ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE : Union[str, Any] = tf_tokenizer(self.paired_sentences )
SCREAMING_SNAKE_CASE : Union[str, Any] = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def __lowercase ( self : Union[str, Any] ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE : Any = tf.function(lowerCAmelCase__ )
for test_inputs in (self.test_sentences, self.paired_sentences):
SCREAMING_SNAKE_CASE : int = tf.constant(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = compiled_tokenizer(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = tf_tokenizer(lowerCAmelCase__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def __lowercase ( self : int ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE : int = ModelToSave(tokenizer=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = tf.convert_to_tensor(self.test_sentences )
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
SCREAMING_SNAKE_CASE : Optional[int] = Path(lowerCAmelCase__ ) / '''saved.model'''
model.save(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.keras.models.load_model(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = loaded_model(lowerCAmelCase__ )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1e-5 )
| 527 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( a_: Optional[int], a_: Tuple ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
_UpperCAmelCase : List[str] = (boundary[1] - boundary[0]) / steps
_UpperCAmelCase : Optional[Any] = boundary[0]
_UpperCAmelCase : Optional[Any] = boundary[1]
_UpperCAmelCase : int = make_points(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
_UpperCAmelCase : int = 0.0
y += (h / 2.0) * f(_UpperCAmelCase )
for i in x_i:
# print(i)
y += h * f(_UpperCAmelCase )
y += (h / 2.0) * f(_UpperCAmelCase )
return y
def __UpperCAmelCase ( a_: Tuple, a_: List[Any], a_: Tuple ):
_UpperCAmelCase : List[Any] = a + h
while x < (b - h):
yield x
_UpperCAmelCase : Union[str, Any] = x + h
def __UpperCAmelCase ( a_: str ): # enter your function here
_UpperCAmelCase : Optional[int] = (x - 0) * (x - 0)
return y
def __UpperCAmelCase ( ):
_UpperCAmelCase : Tuple = 0.0 # Lower bound of integration
_UpperCAmelCase : List[str] = 1.0 # Upper bound of integration
_UpperCAmelCase : Union[str, Any] = 10.0 # define number of steps or resolution
_UpperCAmelCase : Optional[Any] = [a, b] # define boundary of integration
_UpperCAmelCase : Optional[int] = method_a(_UpperCAmelCase, _UpperCAmelCase )
print(f"""y = {y}""" )
if __name__ == "__main__":
main() | 716 | '''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__a = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class A__ ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : List[str] = XGLMTokenizer
UpperCamelCase_ : int = XGLMTokenizerFast
UpperCamelCase_ : Union[str, Any] = True
UpperCamelCase_ : Tuple = True
def _lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase : List[str] = XGLMTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Dict = "<pad>"
_UpperCAmelCase : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def _lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(len(lowerCAmelCase__ ) , 1_0_0_8 )
def _lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_8 )
def _lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
_UpperCAmelCase : Dict = XGLMTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_UpperCAmelCase : List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_UpperCAmelCase : int = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
_UpperCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def _lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
return XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
def _lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase__ , f.name )
_UpperCAmelCase : Union[str, Any] = XGLMTokenizer(f.name , keep_accents=lowerCAmelCase__ )
_UpperCAmelCase : List[str] = pickle.dumps(lowerCAmelCase__ )
pickle.loads(lowerCAmelCase__ )
def _lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_UpperCAmelCase : Tuple = self.get_tokenizer()
_UpperCAmelCase : str = self.get_rust_tokenizer()
_UpperCAmelCase : List[str] = "I was born in 92000, and this is falsé."
_UpperCAmelCase : List[Any] = tokenizer.tokenize(lowerCAmelCase__ )
_UpperCAmelCase : Dict = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Tuple = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : Any = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer()
_UpperCAmelCase : Optional[Any] = tokenizer.encode(lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def _lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Dict = "Hello World!"
_UpperCAmelCase : Union[str, Any] = [2, 3_1_2_2_7, 4_4_4_7, 3_5]
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__ ) )
@slow
def _lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Any = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"
)
# fmt: off
_UpperCAmelCase : Dict = [2, 1_0_1_8, 6_7, 1_1, 1_9_8_8, 2_6_1_7, 5_6_3_1, 2_7_8, 1_1, 3_4_0_7, 4_8, 7_1_6_3_0, 2_8_0_8_5, 4, 3_2_3_4, 1_5_7, 1_3, 6, 5, 6, 4, 3_5_2_6, 7_6_8, 1_5, 6_5_9, 5_7, 2_9_8, 3_9_8_3, 8_6_4, 1_2_9, 2_1, 6, 5, 1_3_6_7_5, 3_7_7, 6_5_2, 7_5_8_0, 1_0_3_4_1, 1_5_5, 2_8_1_7, 4_2_2, 1_6_6_6, 7, 1_6_7_4, 5_3, 1_1_3, 2_0_2_2_7_7, 1_7_8_9_2, 3_3, 6_0, 8_7, 4, 3_2_3_4, 1_5_7, 6_1, 2_6_6_7, 5_2_3_7_6, 1_9, 8_8, 2_3, 7_3_5]
# fmt: on
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__ ) )
@slow
def _lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = {
"input_ids": [[2, 1_0_8_8_2_5, 1_1_6_3, 1_5, 8_8_0_1_0, 4_7_3, 1_5_8_9_8, 1_5_7, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 2_3_8_0_2_1, 1_1_6_3, 5_3, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 5_3_2_8_3, 1_8_2_3_9_6, 8, 1_8_5_6_6, 1_6, 3_6_7_3_3, 4_1_0_1, 8, 2_3_0, 2_4_4_0_1_7, 1_2_2_5_5_3, 7, 1_5, 1_3_2_5_9_7, 4, 2_9_3, 1_2_5_1_1, 7_6_1_0, 4, 3_4_1_4, 1_3_2_5_9_7, 9, 4, 3_2_3_6_1, 3_6_2, 4, 7_3_4, 2_8_5_1_2, 3_2_5_6_9, 1_8, 4, 3_2_3_6_1, 2_6_0_9_6, 1_4_9_8_2, 7_3, 1_8_7_1_5, 2_1_4_3_3, 2_3_5_2_6_1, 1_5, 4_9_2, 1_2_4_2_7, 1_6, 5_3, 1_8_7_1_5, 2_1_4_3_3, 6_5_4_5_4, 1_5, 2_3_6_5_9, 5_6_3, 1_6, 2_7_8, 5_9_7, 2_8_4_3, 5_9_5, 7_9_3_1, 1_8_2_3_9_6, 6_4_1_8_6, 2_2, 8_8_6, 5_9_5, 1_3_2_9_8_1, 5_3, 2_5_5_4_0, 3_4_4_9, 4_3_9_8_2, 3_9_9_0_1, 5_9_5_1, 8_7_8, 3_3_0, 4, 2_7_6_9_4, 8_0_2_6_9, 3_1_2, 5_3, 6_5_1_7, 1_1_7_8_0, 6_1_1, 2_0_4_0_8, 5], [2, 6, 1_3_2_5_9_7, 6_7, 4_2_8_9_7, 3_3, 5_9_2, 8, 1_6_3_7_2_9, 2_5_5_4_0, 3_6_1, 1_3_6_9_9_7, 1_0_9_5_1_4, 1_7_3_2_3_0, 7, 5_0_1, 6_0, 1_0_2_9_1_3, 1_9_6, 5_6_3_1, 2_3_5, 6_3_2_4_3, 4_7_3, 6, 2_3_1_7_5_7, 7_4, 5_2_7_7, 7_9_0_5, 5_3, 3_0_9_5, 3_7_3_1_7, 2_2, 4_5_4, 1_8_3_8_7_4, 5], [2, 2_6_8, 3_1_2_9_8, 4_6_5_3_0, 6, 1_3_2_9_3_5, 4_3_8_3_1, 7, 5_9_7, 3_2, 2_4, 3_6_8_8, 9_8_6_5, 5]],
"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="facebook/xglm-564M" , padding=lowerCAmelCase__ , ) | 257 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a__ : Dict = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = ['ConvNextFeatureExtractor']
a__ : int = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
a__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 51 |
'''simple docstring'''
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class lowerCAmelCase__ :
'''simple docstring'''
@staticmethod
def __snake_case ( *a__ : List[Any] , **a__ : Optional[int] ):
pass
def __snake_case ( SCREAMING_SNAKE_CASE_ : Image ) -> str:
"""simple docstring"""
UpperCAmelCase = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def __snake_case ( SCREAMING_SNAKE_CASE_ : Image ) -> Dict:
"""simple docstring"""
UpperCAmelCase = np.array(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = npimg.shape
return {"hash": hashimage(SCREAMING_SNAKE_CASE_ ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
_lowerCamelCase =dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __snake_case ( self : Union[str, Any] , a__ : Optional[int] , a__ : Dict , a__ : int ):
UpperCAmelCase = MaskGenerationPipeline(model=a__ , image_processor=a__ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __snake_case ( self : int , a__ : Dict , a__ : Tuple ):
pass
@require_tf
@unittest.skip('''Image segmentation not implemented in TF''' )
def __snake_case ( self : str ):
pass
@slow
@require_torch
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''' )
UpperCAmelCase = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=256 )
# Shortening by hashing
UpperCAmelCase = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(a__ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0_444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.021},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0_167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0_132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0_053},
{'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (480, 640)}, '''scores''': 0.9_967},
{'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (480, 640)}, '''scores''': 0.993},
{'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (480, 640)}, '''scores''': 0.9_909},
{'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (480, 640)}, '''scores''': 0.9_879},
{'''mask''': {'''hash''': '''801064ff79''', '''shape''': (480, 640)}, '''scores''': 0.9_834},
{'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (480, 640)}, '''scores''': 0.9_716},
{'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (480, 640)}, '''scores''': 0.9_612},
{'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (480, 640)}, '''scores''': 0.9_599},
{'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (480, 640)}, '''scores''': 0.9_552},
{'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (480, 640)}, '''scores''': 0.9_532},
{'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (480, 640)}, '''scores''': 0.9_516},
{'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (480, 640)}, '''scores''': 0.9_499},
{'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (480, 640)}, '''scores''': 0.9_483},
{'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (480, 640)}, '''scores''': 0.9_464},
{'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (480, 640)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (480, 640)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''c749b25868''', '''shape''': (480, 640)}, '''scores''': 0.9_408},
{'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (480, 640)}, '''scores''': 0.9_335},
{'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (480, 640)}, '''scores''': 0.9_326},
{'''mask''': {'''hash''': '''788b798e24''', '''shape''': (480, 640)}, '''scores''': 0.9_262},
{'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (480, 640)}, '''scores''': 0.8_999},
{'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (480, 640)}, '''scores''': 0.8_986},
{'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (480, 640)}, '''scores''': 0.8_984},
{'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (480, 640)}, '''scores''': 0.8_873},
{'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (480, 640)}, '''scores''': 0.8_871}
] , )
# fmt: on
@require_torch
@slow
def __snake_case ( self : Dict ):
UpperCAmelCase = '''facebook/sam-vit-huge'''
UpperCAmelCase = pipeline('''mask-generation''' , model=a__ )
UpperCAmelCase = image_segmenter(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
UpperCAmelCase = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(a__ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0_444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.0_210},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0_167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0_132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0_053},
] , )
| 51 | 1 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__magic_name__ : List[str] = """"""
__magic_name__ : Any = """"""
__magic_name__ : List[str] = """"""
__magic_name__ : Union[str, Any] = 1 # (0 is vertical, 1 is horizontal)
def snake_case_ ( ):
'''simple docstring'''
_snake_case , _snake_case = get_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print("Processing..." )
_snake_case , _snake_case , _snake_case = update_image_and_anno(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for index, image in enumerate(SCREAMING_SNAKE_CASE__ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_snake_case = random_chars(32 )
_snake_case = paths[index].split(os.sep )[-1].rsplit("." , 1 )[0]
_snake_case = f'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(f'''/{file_root}.jpg''' , SCREAMING_SNAKE_CASE__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Success {index+1}/{len(SCREAMING_SNAKE_CASE__ )} with {file_name}''' )
_snake_case = []
for anno in new_annos[index]:
_snake_case = f'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(SCREAMING_SNAKE_CASE__ )
with open(f'''/{file_root}.txt''' , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = []
_snake_case = []
for label_file in glob.glob(os.path.join(SCREAMING_SNAKE_CASE__ , "*.txt" ) ):
_snake_case = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(SCREAMING_SNAKE_CASE__ ) as in_file:
_snake_case = in_file.readlines()
_snake_case = os.path.join(SCREAMING_SNAKE_CASE__ , f'''{label_name}.jpg''' )
_snake_case = []
for obj_list in obj_lists:
_snake_case = obj_list.rstrip("\n" ).split(" " )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(SCREAMING_SNAKE_CASE__ )
labels.append(SCREAMING_SNAKE_CASE__ )
return img_paths, labels
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 1 ):
'''simple docstring'''
_snake_case = []
_snake_case = []
_snake_case = []
for idx in range(len(SCREAMING_SNAKE_CASE__ ) ):
_snake_case = []
_snake_case = img_list[idx]
path_list.append(SCREAMING_SNAKE_CASE__ )
_snake_case = anno_list[idx]
_snake_case = cva.imread(SCREAMING_SNAKE_CASE__ )
if flip_type == 1:
_snake_case = cva.flip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for bbox in img_annos:
_snake_case = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
_snake_case = cva.flip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for bbox in img_annos:
_snake_case = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(SCREAMING_SNAKE_CASE__ )
new_imgs_list.append(SCREAMING_SNAKE_CASE__ )
return new_imgs_list, new_annos_lists, path_list
def snake_case_ ( SCREAMING_SNAKE_CASE__ = 32 ):
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
_snake_case = ascii_lowercase + digits
return "".join(random.choice(SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 700 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=10 ):
'''simple docstring'''
_snake_case = []
for _ in range(SCREAMING_SNAKE_CASE__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=10 ):
'''simple docstring'''
_snake_case = []
for step in range(SCREAMING_SNAKE_CASE__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case = os.path.join(SCREAMING_SNAKE_CASE__ , "schedule.bin" )
torch.save(scheduler.state_dict() , SCREAMING_SNAKE_CASE__ )
_snake_case = torch.load(SCREAMING_SNAKE_CASE__ )
scheduler.load_state_dict(SCREAMING_SNAKE_CASE__ )
return lrs
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) )
for a, b in zip(lowerCamelCase , lowerCamelCase ):
self.assertAlmostEqual(lowerCamelCase , lowerCamelCase , delta=lowerCamelCase )
def UpperCamelCase( self ):
_snake_case = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCamelCase )
_snake_case = torch.tensor([0.4, 0.2, -0.5] )
_snake_case = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_snake_case = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
_snake_case = criterion(lowerCamelCase , lowerCamelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def UpperCamelCase( self ):
_snake_case = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCamelCase )
_snake_case = torch.tensor([0.4, 0.2, -0.5] )
_snake_case = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_snake_case = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=lowerCamelCase , weight_decay=0.0 , relative_step=lowerCamelCase , scale_parameter=lowerCamelCase , warmup_init=lowerCamelCase , )
for _ in range(1_000 ):
_snake_case = criterion(lowerCamelCase , lowerCamelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ : Any = nn.Linear(50 , 50 ) if is_torch_available() else None
UpperCAmelCase__ : str = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
UpperCAmelCase__ : Optional[Any] = 10
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ):
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) )
for a, b in zip(lowerCamelCase , lowerCamelCase ):
self.assertAlmostEqual(lowerCamelCase , lowerCamelCase , delta=lowerCamelCase , msg=lowerCamelCase )
def UpperCamelCase( self ):
_snake_case = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
_snake_case = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
_snake_case , _snake_case = data
_snake_case = scheduler_func(self.optimizer , **lowerCamelCase )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
_snake_case = unwrap_schedule(lowerCamelCase , self.num_steps )
self.assertListAlmostEqual(
lowerCamelCase , lowerCamelCase , tol=1e-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , )
_snake_case = scheduler_func(self.optimizer , **lowerCamelCase )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(lowerCamelCase ) # wrap to test picklability of the schedule
_snake_case = unwrap_and_save_reload_schedule(lowerCamelCase , self.num_steps )
self.assertListEqual(lowerCamelCase , lowerCamelCase , msg=F'''failed for {scheduler_func} in save and reload''' )
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , lowerCamelCase ):
_snake_case = fn
def __call__( self , *lowerCamelCase , **lowerCamelCase ):
return self.fn(*lowerCamelCase , **lowerCamelCase )
@classmethod
def UpperCamelCase( self , lowerCamelCase ):
_snake_case = list(map(self , scheduler.lr_lambdas ) )
| 368 | 0 |
'''simple docstring'''
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
lowerCAmelCase_ : Any = datasets.logging.get_logger(__name__)
lowerCAmelCase_ : Optional[int] = '''\
@inproceedings{bleurt,
title={BLEURT: Learning Robust Metrics for Text Generation},
author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},
booktitle={ACL},
year={2020},
url={https://arxiv.org/abs/2004.04696}
}
'''
lowerCAmelCase_ : Tuple = '''\
BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)
and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune
it for your specific application (the latter is expected to perform better).
See the project\'s README at https://github.com/google-research/bleurt#readme for more information.
'''
lowerCAmelCase_ : int = '''
BLEURT score.
Args:
`predictions` (list of str): prediction/candidate sentences
`references` (list of str): reference sentences
`checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.
Returns:
\'scores\': List of scores.
Examples:
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> bleurt = datasets.load_metric("bleurt")
>>> results = bleurt.compute(predictions=predictions, references=references)
>>> print([round(v, 2) for v in results["scores"]])
[1.03, 1.04]
'''
lowerCAmelCase_ : Tuple = {
'''bleurt-tiny-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip''',
'''bleurt-tiny-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip''',
'''bleurt-base-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip''',
'''bleurt-base-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip''',
'''bleurt-large-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip''',
'''bleurt-large-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip''',
'''BLEURT-20-D3''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip''',
'''BLEURT-20-D6''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip''',
'''BLEURT-20-D12''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip''',
'''BLEURT-20''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip''',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def snake_case_ (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/google-research/bleurt""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/google-research/bleurt"""] , reference_urls=["""https://github.com/google-research/bleurt""", """https://arxiv.org/abs/2004.04696"""] , )
def snake_case_ (self , lowerCAmelCase__ ):
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
"""Using default BLEURT-Base checkpoint for sequence maximum length 128. """
"""You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512').""" )
_UpperCAmelCase : str = """bleurt-base-128"""
if self.config_name.lower() in CHECKPOINT_URLS:
_UpperCAmelCase : List[str] = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
_UpperCAmelCase : List[str] = self.config_name.upper()
else:
raise KeyError(
F"{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}" )
# download the model checkpoint specified by self.config_name and set up the scorer
_UpperCAmelCase : int = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
_UpperCAmelCase : Tuple = score.BleurtScorer(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : List[Any] = self.scorer.score(references=lowerCAmelCase__ , candidates=lowerCAmelCase__ )
return {"scores": scores}
| 414 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : Any = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class __lowerCAmelCase ( __a ):
snake_case : Optional[int] = """visual_bert"""
def __init__(self , lowerCAmelCase__=3_0_5_2_2 , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1e-12 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , **lowerCAmelCase__ , ):
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = vocab_size
_UpperCAmelCase : int = max_position_embeddings
_UpperCAmelCase : Tuple = hidden_size
_UpperCAmelCase : Tuple = visual_embedding_dim
_UpperCAmelCase : int = num_hidden_layers
_UpperCAmelCase : Optional[Any] = num_attention_heads
_UpperCAmelCase : str = intermediate_size
_UpperCAmelCase : Tuple = hidden_act
_UpperCAmelCase : Optional[int] = hidden_dropout_prob
_UpperCAmelCase : Tuple = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = initializer_range
_UpperCAmelCase : Tuple = type_vocab_size
_UpperCAmelCase : Optional[int] = layer_norm_eps
_UpperCAmelCase : Dict = bypass_transformer
_UpperCAmelCase : Union[str, Any] = special_visual_initialize
| 414 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = BlenderbotSmallConfig
lowerCamelCase_ = {}
lowerCamelCase_ = 'gelu'
def __init__( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any]=13 , UpperCAmelCase__ : Tuple=7 , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Dict=False , UpperCAmelCase__ : Optional[Any]=99 , UpperCAmelCase__ : Dict=32 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : Optional[Any]=37 , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Optional[int]=20 , UpperCAmelCase__ : Optional[Any]=2 , UpperCAmelCase__ : Dict=1 , UpperCAmelCase__ : Tuple=0 , ):
'''simple docstring'''
lowercase : int =parent
lowercase : List[str] =batch_size
lowercase : Optional[int] =seq_length
lowercase : Tuple =is_training
lowercase : List[str] =use_labels
lowercase : List[Any] =vocab_size
lowercase : Optional[Any] =hidden_size
lowercase : Optional[int] =num_hidden_layers
lowercase : Optional[int] =num_attention_heads
lowercase : Dict =intermediate_size
lowercase : List[str] =hidden_dropout_prob
lowercase : List[Any] =attention_probs_dropout_prob
lowercase : Optional[int] =max_position_embeddings
lowercase : List[Any] =eos_token_id
lowercase : Union[str, Any] =pad_token_id
lowercase : Tuple =bos_token_id
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Dict =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowercase : List[Any] =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowercase : Optional[int] =tf.concat([input_ids, eos_tensor] , axis=1 )
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Union[str, Any] =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowercase : Dict =prepare_blenderbot_small_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return config, inputs_dict
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] ):
'''simple docstring'''
lowercase : List[Any] =TFBlenderbotSmallModel(config=UpperCAmelCase__ ).get_decoder()
lowercase : str =inputs_dict['''input_ids''']
lowercase : Union[str, Any] =input_ids[:1, :]
lowercase : List[str] =inputs_dict['''attention_mask'''][:1, :]
lowercase : Union[str, Any] =inputs_dict['''head_mask''']
lowercase : int =1
# first forward pass
lowercase : Any =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , head_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
lowercase : Optional[int] =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowercase : int =ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase : int =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowercase : int =tf.concat([input_ids, next_tokens] , axis=-1 )
lowercase : int =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowercase : Optional[Any] =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0]
lowercase : str =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowercase : Any =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowercase : Union[str, Any] =output_from_no_past[:, -3:, random_slice_idx]
lowercase : List[str] =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1E-3 )
def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : str , __magic_name__ : Optional[int]=None , __magic_name__ : Tuple=None , __magic_name__ : int=None , __magic_name__ : Any=None , __magic_name__ : List[Any]=None , ) -> Optional[Any]:
if attention_mask is None:
lowercase : Optional[Any] =tf.cast(tf.math.not_equal(__magic_name__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowercase : Any =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowercase : Dict =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase : int =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase : Dict =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
lowerCamelCase_ = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
lowerCamelCase_ = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Tuple =TFBlenderbotSmallModelTester(self )
lowercase : List[Any] =ConfigTester(self , config_class=UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase__ )
@require_tokenizers
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
lowerCamelCase_ = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
lowerCamelCase_ = 'facebook/blenderbot_small-90M'
@cached_property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
@cached_property
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Optional[Any] =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : int =self.tokenizer(self.src_text , return_tensors='''tf''' )
lowercase : Optional[int] =self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=UpperCAmelCase__ , )
lowercase : str =self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCAmelCase__ )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 707 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _lowerCAmelCase ( __magic_name__ : Dict ) -> Dict:
for param in module.parameters():
lowercase : List[str] =False
def _lowerCAmelCase ( ) -> List[str]:
lowercase : Dict ='''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowercase : Optional[int] ='''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def _lowerCAmelCase ( __magic_name__ : Union[str, Any] ) -> str:
lowercase : Optional[int] =plt.imshow(__magic_name__ )
fig.axes.get_xaxis().set_visible(__magic_name__ )
fig.axes.get_yaxis().set_visible(__magic_name__ )
plt.show()
def _lowerCAmelCase ( ) -> List[Any]:
lowercase : Any =datetime.now()
lowercase : Dict =current_time.strftime('''%H:%M:%S''' )
return timestamp
| 88 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = None
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Optional[int] , __lowercase : int=1 , __lowercase : int=0 , __lowercase : str=2 , __lowercase : int=5_12 , __lowercase : str="cls" , __lowercase : Optional[Any]=False , __lowercase : Tuple=True , **__lowercase : Optional[int] , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
snake_case_ = project_dim
snake_case_ = pooler_fn
snake_case_ = learn_encoder
snake_case_ = use_attention_mask
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = [r'''pooler''', r'''logit_scale''']
lowerCAmelCase_ = [r'''position_ids''', r'''predictions.decoder.bias''']
lowerCAmelCase_ = '''roberta'''
lowerCAmelCase_ = RobertaSeriesConfig
def __init__( self : Tuple , __lowercase : Dict ):
"""simple docstring"""
super().__init__(__lowercase )
snake_case_ = XLMRobertaModel(__lowercase )
snake_case_ = nn.Linear(config.hidden_size , config.project_dim )
snake_case_ = getattr(__lowercase , "has_pre_transformation" , __lowercase )
if self.has_pre_transformation:
snake_case_ = nn.Linear(config.hidden_size , config.project_dim )
snake_case_ = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def snake_case__ ( self : Any , __lowercase : Optional[torch.Tensor] = None , __lowercase : Optional[torch.Tensor] = None , __lowercase : Optional[torch.Tensor] = None , __lowercase : Optional[torch.Tensor] = None , __lowercase : Optional[torch.Tensor] = None , __lowercase : Optional[torch.Tensor] = None , __lowercase : Optional[torch.Tensor] = None , __lowercase : Optional[torch.Tensor] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[bool] = None , ):
"""simple docstring"""
snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict
snake_case_ = self.base_model(
input_ids=__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , position_ids=__lowercase , head_mask=__lowercase , inputs_embeds=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , output_attentions=__lowercase , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=__lowercase , )
if self.has_pre_transformation:
snake_case_ = outputs["hidden_states"][-2]
snake_case_ = self.pre_LN(__lowercase )
snake_case_ = self.transformation_pre(__lowercase )
return TransformationModelOutput(
projection_state=__lowercase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
snake_case_ = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=__lowercase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 376 |
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
lowercase__ : Dict = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Tuple , __lowercase : int = 14 ):
"""simple docstring"""
if group not in primes:
raise ValueError("Unsupported Group" )
snake_case_ = primes[group]["prime"]
snake_case_ = primes[group]["generator"]
snake_case_ = int(hexlify(urandom(32 ) ) , base=16 )
def snake_case__ ( self : Tuple ):
"""simple docstring"""
return hex(self.__private_key )[2:]
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
snake_case_ = pow(self.generator , self.__private_key , self.prime )
return hex(__lowercase )[2:]
def snake_case__ ( self : int , __lowercase : int ):
"""simple docstring"""
return (
2 <= key <= self.prime - 2
and pow(__lowercase , (self.prime - 1) // 2 , self.prime ) == 1
)
def snake_case__ ( self : Union[str, Any] , __lowercase : str ):
"""simple docstring"""
snake_case_ = int(__lowercase , base=16 )
if not self.is_valid_public_key(__lowercase ):
raise ValueError("Invalid public key" )
snake_case_ = pow(__lowercase , self.__private_key , self.prime )
return shaaaa(str(__lowercase ).encode() ).hexdigest()
@staticmethod
def snake_case__ ( __lowercase : int , __lowercase : int ):
"""simple docstring"""
return (
2 <= remote_public_key_str <= prime - 2
and pow(__lowercase , (prime - 1) // 2 , __lowercase ) == 1
)
@staticmethod
def snake_case__ ( __lowercase : str , __lowercase : str , __lowercase : int = 14 ):
"""simple docstring"""
snake_case_ = int(__lowercase , base=16 )
snake_case_ = int(__lowercase , base=16 )
snake_case_ = primes[group]["prime"]
if not DiffieHellman.is_valid_public_key_static(__lowercase , __lowercase ):
raise ValueError("Invalid public key" )
snake_case_ = pow(__lowercase , __lowercase , __lowercase )
return shaaaa(str(__lowercase ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 376 | 1 |
def UpperCAmelCase__ ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[Any] ):
__a : str = [1]
for i in range(2 , lowerCamelCase_ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
__a : Union[str, Any] = []
__a : Optional[Any] = list(range(lowerCamelCase_ ) )
# Find permutation
while factorials:
__a : Optional[int] = factorials.pop()
__a , __a : List[str] = divmod(lowerCamelCase_ , lowerCamelCase_ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 577 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''xlm-mlm-en-2048''': '''https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json''',
'''xlm-mlm-ende-1024''': '''https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-enfr-1024''': '''https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json''',
'''xlm-mlm-enro-1024''': '''https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json''',
'''xlm-mlm-tlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json''',
'''xlm-mlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json''',
'''xlm-clm-enfr-1024''': '''https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json''',
'''xlm-clm-ende-1024''': '''https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-17-1280''': '''https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json''',
'''xlm-mlm-100-1280''': '''https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json''',
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Tuple = '''xlm'''
__SCREAMING_SNAKE_CASE : int = {
'''hidden_size''': '''emb_dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
'''n_words''': '''vocab_size''', # For backward compatibility
}
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=3_0_1_4_5 , SCREAMING_SNAKE_CASE__ : int=2_0_4_8 , SCREAMING_SNAKE_CASE__ : int=1_2 , SCREAMING_SNAKE_CASE__ : Any=1_6 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : int=1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Dict=5_1_2 , SCREAMING_SNAKE_CASE__ : List[str]=2_0_4_8**-0.5 , SCREAMING_SNAKE_CASE__ : Dict=1e-12 , SCREAMING_SNAKE_CASE__ : Tuple=0.02 , SCREAMING_SNAKE_CASE__ : List[str]=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : int=3 , SCREAMING_SNAKE_CASE__ : Tuple=5 , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]="first" , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : str=5 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5 , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=2 , SCREAMING_SNAKE_CASE__ : Dict=0 , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ):
'''simple docstring'''
__a : Optional[Any] = vocab_size
__a : int = emb_dim
__a : Tuple = n_layers
__a : List[str] = n_heads
__a : Any = dropout
__a : Any = attention_dropout
__a : Any = gelu_activation
__a : Optional[int] = sinusoidal_embeddings
__a : Union[str, Any] = causal
__a : str = asm
__a : Optional[Any] = n_langs
__a : int = use_lang_emb
__a : List[str] = layer_norm_eps
__a : Optional[int] = bos_index
__a : Any = eos_index
__a : str = pad_index
__a : List[str] = unk_index
__a : List[Any] = mask_index
__a : Tuple = is_encoder
__a : str = max_position_embeddings
__a : Any = embed_init_std
__a : int = init_std
__a : Dict = summary_type
__a : List[Any] = summary_use_proj
__a : Dict = summary_activation
__a : Union[str, Any] = summary_proj_to_labels
__a : List[Any] = summary_first_dropout
__a : List[Any] = start_n_top
__a : Tuple = end_n_top
__a : int = mask_token_id
__a : str = lang_id
if "n_words" in kwargs:
__a : Optional[Any] = kwargs['n_words']
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class _UpperCamelCase( __lowerCamelCase ):
@property
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
if self.task == "multiple-choice":
__a : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__a : List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 577 | 1 |
def A__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if not head:
return True
# split the list to two parts
_UpperCAmelCase , _UpperCAmelCase = head.next, head
while fast and fast.next:
_UpperCAmelCase = fast.next.next
_UpperCAmelCase = slow.next
_UpperCAmelCase = slow.next
_UpperCAmelCase = None # Don't forget here! But forget still works!
# reverse the second part
_UpperCAmelCase = None
while second:
_UpperCAmelCase = second.next
_UpperCAmelCase = node
_UpperCAmelCase = second
_UpperCAmelCase = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
_UpperCAmelCase = node.next
_UpperCAmelCase = head.next
return True
def A__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
_UpperCAmelCase = _UpperCAmelCase = _UpperCAmelCase = head
while fast and fast.next:
_UpperCAmelCase , _UpperCAmelCase = fast.next.next, slow.next
# 2. Push the second half into the stack
_UpperCAmelCase = [slow.val]
while slow.next:
_UpperCAmelCase = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
_UpperCAmelCase = cur.next
return True
def A__ ( SCREAMING_SNAKE_CASE_ : Tuple ) -> Dict:
"""simple docstring"""
if not head or not head.next:
return True
_UpperCAmelCase = {}
_UpperCAmelCase = 0
while head:
if head.val in d:
d[head.val].append(SCREAMING_SNAKE_CASE_ )
else:
_UpperCAmelCase = [pos]
_UpperCAmelCase = head.next
pos += 1
_UpperCAmelCase = pos - 1
_UpperCAmelCase = 0
for v in d.values():
if len(SCREAMING_SNAKE_CASE_ ) % 2 != 0:
middle += 1
else:
_UpperCAmelCase = 0
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
if v[i] + v[len(SCREAMING_SNAKE_CASE_ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True | 32 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
_a : Union[str, Any] = """\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
_a : Union[str, Any] = """\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
_a : List[str] = """
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def snake_case_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"] , reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=4 , _lowerCAmelCase=False ):
'''simple docstring'''
lowerCAmelCase__ :Any = compute_bleu(
reference_corpus=_lowerCAmelCase , translation_corpus=_lowerCAmelCase , max_order=_lowerCAmelCase , smooth=_lowerCAmelCase )
((lowerCAmelCase__) ,(lowerCAmelCase__) ,(lowerCAmelCase__) ,(lowerCAmelCase__) ,(lowerCAmelCase__) ,(lowerCAmelCase__)) :Union[str, Any] = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 145 | 0 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class _lowerCAmelCase ( UpperCamelCase_ ):
_lowercase =(DPMSolverSDEScheduler,)
_lowercase =10
def __a ( self , **_UpperCamelCase ) -> int:
lowerCAmelCase_ = {
'num_train_timesteps': 1_100,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'noise_sampler_seed': 0,
}
config.update(**__a )
return config
def __a ( self ) -> int:
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__a )
def __a ( self ) -> Dict:
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def __a ( self ) -> Optional[Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a )
def __a ( self ) -> Optional[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def __a ( self ) -> str:
lowerCAmelCase_ = self.scheduler_classes[0]
lowerCAmelCase_ = self.get_scheduler_config()
lowerCAmelCase_ = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase_ = self.dummy_model()
lowerCAmelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase_ = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase_ = scheduler.scale_model_input(__a , __a )
lowerCAmelCase_ = model(__a , __a )
lowerCAmelCase_ = scheduler.step(__a , __a , __a )
lowerCAmelCase_ = output.prev_sample
lowerCAmelCase_ = torch.sum(torch.abs(__a ) )
lowerCAmelCase_ = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47821044921875 ) < 1e-2
assert abs(result_mean.item() - 0.2178705964565277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59352111816406 ) < 1e-2
assert abs(result_mean.item() - 0.22342906892299652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3
def __a ( self ) -> Tuple:
lowerCAmelCase_ = self.scheduler_classes[0]
lowerCAmelCase_ = self.get_scheduler_config(prediction_type="v_prediction" )
lowerCAmelCase_ = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase_ = self.dummy_model()
lowerCAmelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase_ = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase_ = scheduler.scale_model_input(__a , __a )
lowerCAmelCase_ = model(__a , __a )
lowerCAmelCase_ = scheduler.step(__a , __a , __a )
lowerCAmelCase_ = output.prev_sample
lowerCAmelCase_ = torch.sum(torch.abs(__a ) )
lowerCAmelCase_ = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77149200439453 ) < 1e-2
assert abs(result_mean.item() - 0.16226289014816284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1663360595703 ) < 1e-2
assert abs(result_mean.item() - 0.16688326001167297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8487548828125 ) < 1e-2
assert abs(result_mean.item() - 0.1560530662536621 ) < 1e-3
def __a ( self ) -> int:
lowerCAmelCase_ = self.scheduler_classes[0]
lowerCAmelCase_ = self.get_scheduler_config()
lowerCAmelCase_ = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
lowerCAmelCase_ = self.dummy_model()
lowerCAmelCase_ = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCAmelCase_ = scheduler.scale_model_input(__a , __a )
lowerCAmelCase_ = model(__a , __a )
lowerCAmelCase_ = scheduler.step(__a , __a , __a )
lowerCAmelCase_ = output.prev_sample
lowerCAmelCase_ = torch.sum(torch.abs(__a ) )
lowerCAmelCase_ = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46957397460938 ) < 1e-2
assert abs(result_mean.item() - 0.21805934607982635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59353637695312 ) < 1e-2
assert abs(result_mean.item() - 0.22342908382415771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3
def __a ( self ) -> Dict:
lowerCAmelCase_ = self.scheduler_classes[0]
lowerCAmelCase_ = self.get_scheduler_config()
lowerCAmelCase_ = scheduler_class(**__a , use_karras_sigmas=__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
lowerCAmelCase_ = self.dummy_model()
lowerCAmelCase_ = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma
lowerCAmelCase_ = sample.to(__a )
for t in scheduler.timesteps:
lowerCAmelCase_ = scheduler.scale_model_input(__a , __a )
lowerCAmelCase_ = model(__a , __a )
lowerCAmelCase_ = scheduler.step(__a , __a , __a )
lowerCAmelCase_ = output.prev_sample
lowerCAmelCase_ = torch.sum(torch.abs(__a ) )
lowerCAmelCase_ = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66974135742188 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63653564453125 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3135223388672 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
| 717 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _lowerCAmelCase ( __a , __a , __a , unittest.TestCase ):
_lowercase =StableUnCLIPPipeline
_lowercase =TEXT_TO_IMAGE_PARAMS
_lowercase =TEXT_TO_IMAGE_BATCH_PARAMS
_lowercase =TEXT_TO_IMAGE_IMAGE_PARAMS
_lowercase =TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_lowercase =False
def __a ( self ) -> Dict:
lowerCAmelCase_ = 32
lowerCAmelCase_ = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowerCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowerCAmelCase_ = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_UpperCamelCase , projection_dim=_UpperCamelCase , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
lowerCAmelCase_ = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_UpperCamelCase , num_layers=1 , )
torch.manual_seed(0 )
lowerCAmelCase_ = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_000 , clip_sample=_UpperCamelCase , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
lowerCAmelCase_ = StableUnCLIPImageNormalizer(embedding_dim=_UpperCamelCase )
lowerCAmelCase_ = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
lowerCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowerCAmelCase_ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_UpperCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_UpperCamelCase , layers_per_block=1 , upcast_attention=_UpperCamelCase , use_linear_projection=_UpperCamelCase , )
torch.manual_seed(0 )
lowerCAmelCase_ = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=_UpperCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
lowerCAmelCase_ = AutoencoderKL()
lowerCAmelCase_ = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def __a ( self , _UpperCamelCase , _UpperCamelCase=0 ) -> Tuple:
if str(_UpperCamelCase ).startswith("mps" ):
lowerCAmelCase_ = torch.manual_seed(_UpperCamelCase )
else:
lowerCAmelCase_ = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
lowerCAmelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=_UpperCamelCase )
def __a ( self ) -> str:
lowerCAmelCase_ = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=_UpperCamelCase )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def __a ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
lowerCAmelCase_ = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCAmelCase_ = pipe("anime turle" , generator=_UpperCamelCase , output_type="np" )
lowerCAmelCase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase )
def __a ( self ) -> Optional[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase_ = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
lowerCAmelCase_ = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase_ = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
lowerCAmelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 279 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a__ : str = logging.get_logger(__name__)
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> YolosConfig:
"""simple docstring"""
UpperCAmelCase = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
UpperCAmelCase = 192
UpperCAmelCase = 768
UpperCAmelCase = 12
UpperCAmelCase = 3
UpperCAmelCase = [800, 1_333]
UpperCAmelCase = False
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase = 330
UpperCAmelCase = 14
UpperCAmelCase = 6
UpperCAmelCase = 1_320
elif "yolos_s" in yolos_name:
UpperCAmelCase = 384
UpperCAmelCase = 1_536
UpperCAmelCase = 12
UpperCAmelCase = 6
elif "yolos_b" in yolos_name:
UpperCAmelCase = [800, 1_344]
UpperCAmelCase = 91
UpperCAmelCase = '''huggingface/label-files'''
UpperCAmelCase = '''coco-detection-id2label.json'''
UpperCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def __snake_case ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : YolosConfig , SCREAMING_SNAKE_CASE_ : bool = False ) -> str:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
UpperCAmelCase = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase = in_proj_bias[: config.hidden_size]
UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase = in_proj_weight[-config.hidden_size :, :]
UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
if "backbone" in name:
UpperCAmelCase = name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
UpperCAmelCase = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
UpperCAmelCase = name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
UpperCAmelCase = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
UpperCAmelCase = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
UpperCAmelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
UpperCAmelCase = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
UpperCAmelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
UpperCAmelCase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
UpperCAmelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCAmelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCAmelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCAmelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
UpperCAmelCase = name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
UpperCAmelCase = name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
UpperCAmelCase = name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def __snake_case ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : YolosForObjectDetection ) -> dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCAmelCase = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "qkv" in key:
UpperCAmelCase = key.split('''.''' )
UpperCAmelCase = int(key_split[2] )
UpperCAmelCase = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
UpperCAmelCase = val[:dim, :]
UpperCAmelCase = val[
dim : dim * 2, :
]
UpperCAmelCase = val[-dim:, :]
else:
UpperCAmelCase = val[:dim]
UpperCAmelCase = val[dim : dim * 2]
UpperCAmelCase = val[-dim:]
else:
UpperCAmelCase = val
return orig_state_dict
def __snake_case ( ) -> torch.Tensor:
"""simple docstring"""
UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def __snake_case ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ) -> str:
"""simple docstring"""
UpperCAmelCase = get_yolos_config(SCREAMING_SNAKE_CASE_ )
# load original state_dict
UpperCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' )['''model''']
# load 🤗 model
UpperCAmelCase = YolosForObjectDetection(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image, prepared by YolosImageProcessor
UpperCAmelCase = 800 if yolos_name != '''yolos_ti''' else 512
UpperCAmelCase = YolosImageProcessor(format='''coco_detection''' , size=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCAmelCase = model(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase, UpperCAmelCase = outputs.logits, outputs.pred_boxes
UpperCAmelCase, UpperCAmelCase = None, None
if yolos_name == "yolos_ti":
UpperCAmelCase = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
UpperCAmelCase = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
UpperCAmelCase = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
UpperCAmelCase = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
UpperCAmelCase = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
UpperCAmelCase = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
UpperCAmelCase = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
UpperCAmelCase = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
UpperCAmelCase = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(f"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(f"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
UpperCAmelCase = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''' )
UpperCAmelCase = model_mapping[yolos_name]
image_processor.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='''hustvl''' )
model.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='''hustvl''' )
if __name__ == "__main__":
a__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--yolos_name',
default='yolos_s_200_pre',
type=str,
help=(
'Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','
' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'
),
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original state dict (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a__ : Optional[Any] = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 51 |
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a__ : str = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =XGLMTokenizer
_lowerCamelCase =XGLMTokenizerFast
_lowerCamelCase =True
_lowerCamelCase =True
def __snake_case ( self : Optional[int] ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase = XGLMTokenizer(a__ , keep_accents=a__ )
tokenizer.save_pretrained(self.tmpdirname )
def __snake_case ( self : List[Any] ):
UpperCAmelCase = '''<pad>'''
UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ )
def __snake_case ( self : Tuple ):
UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(len(a__ ) , 1008 )
def __snake_case ( self : List[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = XGLMTokenizer(a__ , keep_accents=a__ )
UpperCAmelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(a__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(
a__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __snake_case ( self : Optional[Any] ):
return XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
def __snake_case ( self : Optional[int] ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(a__ , f.name )
UpperCAmelCase = XGLMTokenizer(f.name , keep_accents=a__ )
UpperCAmelCase = pickle.dumps(a__ )
pickle.loads(a__ )
def __snake_case ( self : Tuple ):
if not self.test_rust_tokenizer:
return
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase = tokenizer.tokenize(a__ )
UpperCAmelCase = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
UpperCAmelCase = tokenizer.encode(a__ , add_special_tokens=a__ )
UpperCAmelCase = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = tokenizer.encode(a__ )
UpperCAmelCase = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
@slow
def __snake_case ( self : int ):
UpperCAmelCase = '''Hello World!'''
UpperCAmelCase = [2, 31227, 4447, 35]
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __snake_case ( self : List[str] ):
UpperCAmelCase = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'''
)
# fmt: off
UpperCAmelCase = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __snake_case ( self : Any ):
# fmt: off
UpperCAmelCase = {
'''input_ids''': [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]],
'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name='''facebook/xglm-564M''' , padding=a__ , )
| 51 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
"""uclanlp/visualbert-vqa""": """https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-pre""": """https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-vcr""": """https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-pre""": """https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-nlvr2""": """https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-pre""": """https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"""
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class __magic_name__ ( __UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = "visual_bert"
def __init__( self: Optional[Any] , _lowerCamelCase: Optional[Any]=3_05_22 , _lowerCamelCase: Dict=7_68 , _lowerCamelCase: Tuple=5_12 , _lowerCamelCase: List[str]=12 , _lowerCamelCase: Union[str, Any]=12 , _lowerCamelCase: Optional[int]=30_72 , _lowerCamelCase: Optional[Any]="gelu" , _lowerCamelCase: List[str]=0.1 , _lowerCamelCase: Union[str, Any]=0.1 , _lowerCamelCase: int=5_12 , _lowerCamelCase: str=2 , _lowerCamelCase: Dict=0.02 , _lowerCamelCase: List[Any]=1E-12 , _lowerCamelCase: Union[str, Any]=False , _lowerCamelCase: Any=True , _lowerCamelCase: str=1 , _lowerCamelCase: Optional[Any]=0 , _lowerCamelCase: List[str]=2 , **_lowerCamelCase: Union[str, Any] , ):
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = visual_embedding_dim
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = bypass_transformer
SCREAMING_SNAKE_CASE_ = special_visual_initialize
| 89 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __magic_name__ ( unittest.TestCase):
'''simple docstring'''
def __init__( self: Union[str, Any] , _lowerCamelCase: List[str] , _lowerCamelCase: Union[str, Any]=7 , _lowerCamelCase: int=3 , _lowerCamelCase: Optional[int]=18 , _lowerCamelCase: Optional[Any]=30 , _lowerCamelCase: Any=4_00 , _lowerCamelCase: List[str]=True , _lowerCamelCase: str=None , _lowerCamelCase: Union[str, Any]=True , _lowerCamelCase: Union[str, Any]=False , _lowerCamelCase: int=True , _lowerCamelCase: Any=True , _lowerCamelCase: List[str]=[0.5, 0.5, 0.5] , _lowerCamelCase: Dict=[0.5, 0.5, 0.5] , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = min_resolution
SCREAMING_SNAKE_CASE_ = max_resolution
SCREAMING_SNAKE_CASE_ = do_resize
SCREAMING_SNAKE_CASE_ = size if size is not None else {'''height''': 18, '''width''': 20}
SCREAMING_SNAKE_CASE_ = do_thumbnail
SCREAMING_SNAKE_CASE_ = do_align_axis
SCREAMING_SNAKE_CASE_ = do_pad
SCREAMING_SNAKE_CASE_ = do_normalize
SCREAMING_SNAKE_CASE_ = image_mean
SCREAMING_SNAKE_CASE_ = image_std
def _A ( self: str ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __magic_name__ ( __UpperCAmelCase , unittest.TestCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = DonutImageProcessor if is_vision_available() else None
def _A ( self: Optional[int] ):
SCREAMING_SNAKE_CASE_ = DonutImageProcessingTester(self )
@property
def _A ( self: List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _A ( self: Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''size''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_thumbnail''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_pad''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''image_std''' ) )
def _A ( self: List[str] ):
SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} )
SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} )
def _A ( self: List[Any] ):
pass
@is_flaky()
def _A ( self: int ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def _A ( self: Optional[Any] ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def _A ( self: List[str] ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 89 | 1 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def A_ ( a ):
"""simple docstring"""
return getitem, k
def A_ ( a , a ):
"""simple docstring"""
return setitem, k, v
def A_ ( a ):
"""simple docstring"""
return delitem, k
def A_ ( a , a , *a ):
"""simple docstring"""
try:
return fun(a , *a ), None
except Exception as e:
return None, e
lowerCAmelCase : int = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
lowerCAmelCase : Dict = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
lowerCAmelCase : List[str] = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
lowerCAmelCase : Optional[int] = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
lowerCAmelCase : Tuple = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowerCAmelCase : List[Any] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
'operations' , (
pytest.param(_add_items , id='add items' ),
pytest.param(_overwrite_items , id='overwrite items' ),
pytest.param(_delete_items , id='delete items' ),
pytest.param(_access_absent_items , id='access absent items' ),
pytest.param(_add_with_resize_up , id='add with resize up' ),
pytest.param(_add_with_resize_down , id='add with resize down' ),
) , )
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = HashMap(initial_block_size=4 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
for _, (fun, *args) in enumerate(a ):
SCREAMING_SNAKE_CASE_ : Tuple = _run_operation(a , a , *a )
SCREAMING_SNAKE_CASE_ : Optional[int] = _run_operation(a , a , *a )
assert my_res == py_res
assert str(a ) == str(a )
assert set(a ) == set(a )
assert len(a ) == len(a )
assert set(my.items() ) == set(py.items() )
def A_ ( ):
"""simple docstring"""
def is_public(a ) -> bool:
return not name.startswith('_' )
SCREAMING_SNAKE_CASE_ : str = {name for name in dir({} ) if is_public(a )}
SCREAMING_SNAKE_CASE_ : str = {name for name in dir(HashMap() ) if is_public(a )}
assert dict_public_names > hash_public_names
| 511 | '''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = LayoutLMTokenizer
SCREAMING_SNAKE_CASE = LayoutLMTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def _lowerCAmelCase( self ) -> Dict:
super().setUp()
lowercase__ : Any = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowercase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _lowerCAmelCase( self , **__lowerCAmelCase ) -> Optional[int]:
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> List[Any]:
lowercase__ : Union[str, Any] = '''UNwant\u00E9d,running'''
lowercase__ : int = '''unwanted, running'''
return input_text, output_text
def _lowerCAmelCase( self ) -> int:
lowercase__ : List[str] = self.tokenizer_class(self.vocab_file )
lowercase__ : List[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__lowerCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [7, 4, 5, 10, 8, 9] )
def _lowerCAmelCase( self ) -> Union[str, Any]:
pass
| 152 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 719 |
"""simple docstring"""
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
a = logging.get_logger(__name__)
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = '''AutoTokenizer'''
UpperCAmelCase : Optional[Any] = ['''tokenizer''']
UpperCAmelCase : List[str] = {
'''semantic_prompt''': 1,
'''coarse_prompt''': 2,
'''fine_prompt''': 2,
}
def __init__( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple=None ):
super().__init__(_UpperCAmelCase )
_A = speaker_embeddings
@classmethod
def lowerCAmelCase_ ( cls : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int="speaker_embeddings_path.json" , **_UpperCAmelCase : int ):
if speaker_embeddings_dict_path is not None:
_A = get_file_from_repo(
_UpperCAmelCase , _UpperCAmelCase , subfolder=kwargs.pop('subfolder' , _UpperCAmelCase ) , cache_dir=kwargs.pop('cache_dir' , _UpperCAmelCase ) , force_download=kwargs.pop('force_download' , _UpperCAmelCase ) , proxies=kwargs.pop('proxies' , _UpperCAmelCase ) , resume_download=kwargs.pop('resume_download' , _UpperCAmelCase ) , local_files_only=kwargs.pop('local_files_only' , _UpperCAmelCase ) , use_auth_token=kwargs.pop('use_auth_token' , _UpperCAmelCase ) , revision=kwargs.pop('revision' , _UpperCAmelCase ) , )
if speaker_embeddings_path is None:
logger.warning(
F'''`{os.path.join(_UpperCAmelCase , _UpperCAmelCase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
_A = None
else:
with open(_UpperCAmelCase ) as speaker_embeddings_json:
_A = json.load(_UpperCAmelCase )
else:
_A = None
_A = AutoTokenizer.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
return cls(tokenizer=_UpperCAmelCase , speaker_embeddings=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : int="speaker_embeddings_path.json" , _UpperCAmelCase : Union[str, Any]="speaker_embeddings" , _UpperCAmelCase : bool = False , **_UpperCAmelCase : Tuple , ):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_UpperCAmelCase , _UpperCAmelCase , 'v2' ) , exist_ok=_UpperCAmelCase )
_A = {}
_A = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_A = self._load_voice_preset(_UpperCAmelCase )
_A = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] , _UpperCAmelCase , F'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=_UpperCAmelCase , )
_A = os.path.join(_UpperCAmelCase , F'''{prompt_key}_{key}.npy''' )
_A = tmp_dict
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , 'w' ) as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
super().save_pretrained(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : str = None , **_UpperCAmelCase : Optional[int] ):
_A = self.speaker_embeddings[voice_preset]
_A = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
_A = get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] , subfolder=kwargs.pop('subfolder' , _UpperCAmelCase ) , cache_dir=kwargs.pop('cache_dir' , _UpperCAmelCase ) , force_download=kwargs.pop('force_download' , _UpperCAmelCase ) , proxies=kwargs.pop('proxies' , _UpperCAmelCase ) , resume_download=kwargs.pop('resume_download' , _UpperCAmelCase ) , local_files_only=kwargs.pop('local_files_only' , _UpperCAmelCase ) , use_auth_token=kwargs.pop('use_auth_token' , _UpperCAmelCase ) , revision=kwargs.pop('revision' , _UpperCAmelCase ) , )
if path is None:
raise ValueError(
F'''`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.''' )
_A = np.load(_UpperCAmelCase )
return voice_preset_dict
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Optional[dict] = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self : List[Any] , _UpperCAmelCase : int=None , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : List[str]="pt" , _UpperCAmelCase : Union[str, Any]=256 , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Any=False , **_UpperCAmelCase : Any , ):
if voice_preset is not None and not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
if (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_A = self._load_voice_preset(_UpperCAmelCase )
else:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not voice_preset.endswith('.npz' ):
_A = voice_preset + '.npz'
_A = np.load(_UpperCAmelCase )
if voice_preset is not None:
self._validate_voice_preset_dict(_UpperCAmelCase , **_UpperCAmelCase )
_A = BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
_A = self.tokenizer(
_UpperCAmelCase , return_tensors=_UpperCAmelCase , padding='max_length' , max_length=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
if voice_preset is not None:
_A = voice_preset
return encoded_text
| 505 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.