code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from collections import deque
from math import floor
from random import random
from time import time
class lowerCamelCase__:
def __init__( self: Any ):
__lowerCamelCase = {}
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Tuple , UpperCamelCase_: List[Any] , UpperCamelCase_: Dict=1 ):
if self.graph.get(UpperCamelCase_ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
__lowerCamelCase = [[w, v]]
if not self.graph.get(UpperCamelCase_ ):
__lowerCamelCase = []
def lowerCAmelCase__ ( self: Any ):
return list(self.graph )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: int , UpperCamelCase_: int ):
if self.graph.get(UpperCamelCase_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: Union[str, Any]=-2 , UpperCamelCase_: Union[str, Any]=-1 ):
if s == d:
return []
__lowerCamelCase = []
__lowerCamelCase = []
if s == -2:
__lowerCamelCase = list(self.graph )[0]
stack.append(UpperCamelCase_ )
visited.append(UpperCamelCase_ )
__lowerCamelCase = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__lowerCamelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(UpperCamelCase_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
__lowerCamelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(UpperCamelCase_ ) != 0:
__lowerCamelCase = stack[len(UpperCamelCase_ ) - 1]
else:
__lowerCamelCase = ss
# check if se have reached the starting point
if len(UpperCamelCase_ ) == 0:
return visited
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any=-1 ):
if c == -1:
__lowerCamelCase = floor(random() * 1_00_00 ) + 10
for i in range(UpperCamelCase_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
__lowerCamelCase = floor(random() * c ) + 1
if n != i:
self.add_pair(UpperCamelCase_ , UpperCamelCase_ , 1 )
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Dict=-2 ):
__lowerCamelCase = deque()
__lowerCamelCase = []
if s == -2:
__lowerCamelCase = list(self.graph )[0]
d.append(UpperCamelCase_ )
visited.append(UpperCamelCase_ )
while d:
__lowerCamelCase = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Any ):
return len(self.graph[u] )
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Any=-2 ):
__lowerCamelCase = []
__lowerCamelCase = []
if s == -2:
__lowerCamelCase = list(self.graph )[0]
stack.append(UpperCamelCase_ )
visited.append(UpperCamelCase_ )
__lowerCamelCase = s
__lowerCamelCase = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__lowerCamelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__lowerCamelCase = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(UpperCamelCase_ ) != 0:
__lowerCamelCase = stack[len(UpperCamelCase_ ) - 1]
else:
__lowerCamelCase = ss
# check if se have reached the starting point
if len(UpperCamelCase_ ) == 0:
return sorted_nodes
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = list(self.graph )[0]
stack.append(UpperCamelCase_ )
visited.append(UpperCamelCase_ )
__lowerCamelCase = -2
__lowerCamelCase = []
__lowerCamelCase = s
__lowerCamelCase = False
__lowerCamelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__lowerCamelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__lowerCamelCase = len(UpperCamelCase_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__lowerCamelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__lowerCamelCase = True
if len(UpperCamelCase_ ) != 0:
__lowerCamelCase = stack[len(UpperCamelCase_ ) - 1]
else:
__lowerCamelCase = False
indirect_parents.append(UpperCamelCase_ )
__lowerCamelCase = s
__lowerCamelCase = ss
# check if se have reached the starting point
if len(UpperCamelCase_ ) == 0:
return list(UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = list(self.graph )[0]
stack.append(UpperCamelCase_ )
visited.append(UpperCamelCase_ )
__lowerCamelCase = -2
__lowerCamelCase = []
__lowerCamelCase = s
__lowerCamelCase = False
__lowerCamelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__lowerCamelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__lowerCamelCase = len(UpperCamelCase_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__lowerCamelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__lowerCamelCase = True
if len(UpperCamelCase_ ) != 0:
__lowerCamelCase = stack[len(UpperCamelCase_ ) - 1]
else:
__lowerCamelCase = False
indirect_parents.append(UpperCamelCase_ )
__lowerCamelCase = s
__lowerCamelCase = ss
# check if se have reached the starting point
if len(UpperCamelCase_ ) == 0:
return False
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: str=-2 , UpperCamelCase_: Optional[Any]=-1 ):
__lowerCamelCase = time()
self.dfs(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = time()
return end - begin
def lowerCAmelCase__ ( self: str , UpperCamelCase_: List[str]=-2 ):
__lowerCamelCase = time()
self.bfs(UpperCamelCase_ )
__lowerCamelCase = time()
return end - begin
class lowerCamelCase__:
def __init__( self: str ):
__lowerCamelCase = {}
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Optional[Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: Union[str, Any]=1 ):
# check if the u exists
if self.graph.get(UpperCamelCase_ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
__lowerCamelCase = [[w, v]]
# add the other way
if self.graph.get(UpperCamelCase_ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
__lowerCamelCase = [[w, u]]
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: Dict ):
if self.graph.get(UpperCamelCase_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(UpperCamelCase_ )
# the other way round
if self.graph.get(UpperCamelCase_ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any=-2 , UpperCamelCase_: str=-1 ):
if s == d:
return []
__lowerCamelCase = []
__lowerCamelCase = []
if s == -2:
__lowerCamelCase = list(self.graph )[0]
stack.append(UpperCamelCase_ )
visited.append(UpperCamelCase_ )
__lowerCamelCase = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__lowerCamelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(UpperCamelCase_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
__lowerCamelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(UpperCamelCase_ ) != 0:
__lowerCamelCase = stack[len(UpperCamelCase_ ) - 1]
else:
__lowerCamelCase = ss
# check if se have reached the starting point
if len(UpperCamelCase_ ) == 0:
return visited
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: str=-1 ):
if c == -1:
__lowerCamelCase = floor(random() * 1_00_00 ) + 10
for i in range(UpperCamelCase_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
__lowerCamelCase = floor(random() * c ) + 1
if n != i:
self.add_pair(UpperCamelCase_ , UpperCamelCase_ , 1 )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Any=-2 ):
__lowerCamelCase = deque()
__lowerCamelCase = []
if s == -2:
__lowerCamelCase = list(self.graph )[0]
d.append(UpperCamelCase_ )
visited.append(UpperCamelCase_ )
while d:
__lowerCamelCase = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: Optional[Any] ):
return len(self.graph[u] )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = list(self.graph )[0]
stack.append(UpperCamelCase_ )
visited.append(UpperCamelCase_ )
__lowerCamelCase = -2
__lowerCamelCase = []
__lowerCamelCase = s
__lowerCamelCase = False
__lowerCamelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__lowerCamelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__lowerCamelCase = len(UpperCamelCase_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__lowerCamelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__lowerCamelCase = True
if len(UpperCamelCase_ ) != 0:
__lowerCamelCase = stack[len(UpperCamelCase_ ) - 1]
else:
__lowerCamelCase = False
indirect_parents.append(UpperCamelCase_ )
__lowerCamelCase = s
__lowerCamelCase = ss
# check if se have reached the starting point
if len(UpperCamelCase_ ) == 0:
return list(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = list(self.graph )[0]
stack.append(UpperCamelCase_ )
visited.append(UpperCamelCase_ )
__lowerCamelCase = -2
__lowerCamelCase = []
__lowerCamelCase = s
__lowerCamelCase = False
__lowerCamelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__lowerCamelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__lowerCamelCase = len(UpperCamelCase_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__lowerCamelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__lowerCamelCase = True
if len(UpperCamelCase_ ) != 0:
__lowerCamelCase = stack[len(UpperCamelCase_ ) - 1]
else:
__lowerCamelCase = False
indirect_parents.append(UpperCamelCase_ )
__lowerCamelCase = s
__lowerCamelCase = ss
# check if se have reached the starting point
if len(UpperCamelCase_ ) == 0:
return False
def lowerCAmelCase__ ( self: Dict ):
return list(self.graph )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Union[str, Any]=-2 , UpperCamelCase_: Dict=-1 ):
__lowerCamelCase = time()
self.dfs(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = time()
return end - begin
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Dict=-2 ):
__lowerCamelCase = time()
self.bfs(UpperCamelCase_ )
__lowerCamelCase = time()
return end - begin
| 361
|
from math import ceil, sqrt
def lowerCamelCase__ ( A__ : int = 1000000 ):
'''simple docstring'''
__lowerCamelCase = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
__lowerCamelCase = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
__lowerCamelCase = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 29
| 0
|
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = torch.nn.Linear(2 , 4 )
__lowerCamelCase = torch.optim.AdamW(model.parameters() , lr=1.0 )
__lowerCamelCase = torch.optim.lr_scheduler.OneCycleLR(A__ , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
__lowerCamelCase = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
__lowerCamelCase = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
__lowerCamelCase = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(A__ )
class lowerCamelCase__( __lowerCamelCase):
@require_cuda
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = Accelerator(cpu=UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = Accelerator()
__lowerCamelCase = GradientState()
assert state.num_steps == 1
__lowerCamelCase = 4
assert state.num_steps == 4
assert state.sync_gradients is True
__lowerCamelCase = False
assert state.sync_gradients is False
GradientState._reset_state()
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = Accelerator()
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = create_components()
(
(
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
),
) = accelerator.prepare(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = Accelerator()
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = create_components()
accelerator.prepare(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def lowerCAmelCase__ ( self: Optional[Any] ):
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*UpperCamelCase_: List[Any] , **UpperCamelCase_: int ):
pass
with patch("""torch.cuda.set_device""" , UpperCamelCase_ ), patch_environment(ACCELERATE_TORCH_DEVICE="""cuda:64""" ):
__lowerCamelCase = Accelerator()
self.assertEqual(str(accelerator.state.device ) , """cuda:64""" )
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = Accelerator()
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = create_components()
accelerator.prepare(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = get_signature(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(UpperCamelCase_ )
# make sure random weights don't match
load_random_weights(UpperCamelCase_ )
self.assertTrue(abs(model_signature - get_signature(UpperCamelCase_ ) ) > 1E-3 )
# make sure loaded weights match
accelerator.load_state(UpperCamelCase_ )
self.assertTrue(abs(model_signature - get_signature(UpperCamelCase_ ) ) < 1E-3 )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = Accelerator()
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = create_components()
accelerator.prepare(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = get_signature(UpperCamelCase_ )
# saving hook
def save_config(UpperCamelCase_: Any , UpperCamelCase_: Optional[Any] , UpperCamelCase_: List[str] ):
__lowerCamelCase = {"""class_name""": models[0].__class__.__name__}
with open(os.path.join(UpperCamelCase_ , """data.json""" ) , """w""" ) as f:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
# loading hook
def load_config(UpperCamelCase_: Dict , UpperCamelCase_: List[Any] ):
with open(os.path.join(UpperCamelCase_ , """data.json""" ) , """r""" ) as f:
__lowerCamelCase = json.load(UpperCamelCase_ )
__lowerCamelCase = config["""class_name"""]
__lowerCamelCase = accelerator.register_save_state_pre_hook(UpperCamelCase_ )
__lowerCamelCase = accelerator.register_load_state_pre_hook(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(UpperCamelCase_ )
# make sure random weights don't match with hooks
load_random_weights(UpperCamelCase_ )
self.assertTrue(abs(model_signature - get_signature(UpperCamelCase_ ) ) > 1E-3 )
# random class name to verify correct one is loaded
__lowerCamelCase = """random"""
# make sure loaded weights match with hooks
accelerator.load_state(UpperCamelCase_ )
self.assertTrue(abs(model_signature - get_signature(UpperCamelCase_ ) ) < 1E-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(UpperCamelCase_ )
# make sure random weights don't match with hooks removed
load_random_weights(UpperCamelCase_ )
self.assertTrue(abs(model_signature - get_signature(UpperCamelCase_ ) ) > 1E-3 )
# random class name to verify correct one is loaded
__lowerCamelCase = """random"""
# make sure loaded weights match with hooks removed
accelerator.load_state(UpperCamelCase_ )
self.assertTrue(abs(model_signature - get_signature(UpperCamelCase_ ) ) < 1E-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = Accelerator()
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = create_components()
__lowerCamelCase = None
# This should work
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
self.assertTrue(dummy_obj is None )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = Accelerator()
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = create_components()
__lowerCamelCase = [1, 2, 3]
# This should work
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(
getattr(UpperCamelCase_ , """_is_accelerate_prepared""" , UpperCamelCase_ ) , UpperCamelCase_ , """Dummy object should have `_is_accelerate_prepared` set to `True`""" , )
self.assertEqual(
getattr(UpperCamelCase_ , """_is_accelerate_prepared""" , UpperCamelCase_ ) , UpperCamelCase_ , """Model is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(UpperCamelCase_ , """_is_accelerate_prepared""" , UpperCamelCase_ ) , UpperCamelCase_ , """Optimizer is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(UpperCamelCase_ , """_is_accelerate_prepared""" , UpperCamelCase_ ) , UpperCamelCase_ , """Scheduler is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(UpperCamelCase_ , """_is_accelerate_prepared""" , UpperCamelCase_ ) , UpperCamelCase_ , """Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(UpperCamelCase_ , """_is_accelerate_prepared""" , UpperCamelCase_ ) , UpperCamelCase_ , """Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , )
@slow
@require_bnb
def lowerCAmelCase__ ( self: Optional[int] ):
from transformers import AutoModelForCausalLM
__lowerCamelCase = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=UpperCamelCase_ , device_map={"""""": 0} , )
__lowerCamelCase = Accelerator()
# This should work
__lowerCamelCase = accelerator.prepare(UpperCamelCase_ )
@slow
@require_bnb
def lowerCAmelCase__ ( self: Dict ):
from transformers import AutoModelForCausalLM
__lowerCamelCase = Accelerator()
with init_empty_weights():
__lowerCamelCase = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
model.tie_weights()
__lowerCamelCase = infer_auto_device_map(UpperCamelCase_ )
__lowerCamelCase = """cpu"""
__lowerCamelCase = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , device_map=UpperCamelCase_ , load_in_abit=UpperCamelCase_ , llm_inta_enable_fpaa_cpu_offload=UpperCamelCase_ )
# This should not work and get value error
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = accelerator.prepare(UpperCamelCase_ )
@slow
@require_bnb
@require_multi_gpu
def lowerCAmelCase__ ( self: Optional[Any] ):
from transformers import AutoModelForCausalLM
__lowerCamelCase = {"""distributed_type""": DistributedType.MULTI_GPU}
with init_empty_weights():
__lowerCamelCase = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
model.tie_weights()
__lowerCamelCase = infer_auto_device_map(UpperCamelCase_ )
__lowerCamelCase = 1
__lowerCamelCase = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=UpperCamelCase_ , device_map=UpperCamelCase_ , )
__lowerCamelCase = Accelerator()
# This should not work and get value error
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = accelerator.prepare(UpperCamelCase_ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def lowerCAmelCase__ ( self: Optional[Any] ):
from transformers import AutoModelForCausalLM
with init_empty_weights():
__lowerCamelCase = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
__lowerCamelCase = infer_auto_device_map(UpperCamelCase_ )
__lowerCamelCase = 1
__lowerCamelCase = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=UpperCamelCase_ , device_map=UpperCamelCase_ , )
__lowerCamelCase = Accelerator()
# This should work
__lowerCamelCase = accelerator.prepare(UpperCamelCase_ )
@require_cuda
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = torch.nn.Linear(10 , 10 )
__lowerCamelCase = torch.optim.SGD(model.parameters() , lr=0.01 )
__lowerCamelCase = Accelerator(cpu=UpperCamelCase_ )
__lowerCamelCase = accelerator.prepare(UpperCamelCase_ )
| 362
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Optional[int] = IFInpaintingPipeline
UpperCAmelCase__ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
UpperCAmelCase__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase__ : List[str] = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCAmelCase__ ( self: List[str] ):
return self._get_dummy_components()
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Dict , UpperCamelCase_: str=0 ):
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase__ ( self: Union[str, Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCAmelCase__ ( self: Union[str, Any] ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCAmelCase__ ( self: Optional[int] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCAmelCase__ ( self: Any ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCAmelCase__ ( self: str ):
self._test_save_load_local()
def lowerCAmelCase__ ( self: str ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 29
| 0
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
__lowerCamelCase = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__lowerCamelCase = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
__lowerCamelCase = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 1_60_00,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCamelCase = os.path.join(self.tmpdirname , UpperCamelCase_ )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + """\n""" )
# load decoder from hub
__lowerCamelCase = """hf-internal-testing/ngram-beam-search-decoder"""
def lowerCAmelCase__ ( self: str , **UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = self.add_kwargs_tokens_map.copy()
kwargs.update(UpperCamelCase_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , **UpperCamelCase_: Union[str, Any] ):
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , **UpperCamelCase_: List[str] ):
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = self.get_feature_extractor()
__lowerCamelCase = self.get_decoder()
__lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=UpperCamelCase_ , feature_extractor=UpperCamelCase_ , decoder=UpperCamelCase_ )
processor.save_pretrained(self.tmpdirname )
__lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , UpperCamelCase_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(UpperCamelCase_ , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=UpperCamelCase_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.get_feature_extractor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = self.get_decoder()
__lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=UpperCamelCase_ , feature_extractor=UpperCamelCase_ , decoder=UpperCamelCase_ )
__lowerCamelCase = floats_list((3, 10_00) )
__lowerCamelCase = feature_extractor(UpperCamelCase_ , return_tensors="""np""" )
__lowerCamelCase = processor(UpperCamelCase_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = self.get_feature_extractor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = self.get_decoder()
__lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=UpperCamelCase_ , feature_extractor=UpperCamelCase_ , decoder=UpperCamelCase_ )
__lowerCamelCase = """This is a test string"""
__lowerCamelCase = processor(text=UpperCamelCase_ )
__lowerCamelCase = tokenizer(UpperCamelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Union[str, Any]=(2, 10, 16) , UpperCamelCase_: Optional[Any]=77 ):
np.random.seed(UpperCamelCase_ )
return np.random.rand(*UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = self.get_feature_extractor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = self.get_decoder()
__lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=UpperCamelCase_ , feature_extractor=UpperCamelCase_ , decoder=UpperCamelCase_ )
__lowerCamelCase = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__lowerCamelCase = processor.decode(UpperCamelCase_ )
__lowerCamelCase = decoder.decode_beams(UpperCamelCase_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any ):
__lowerCamelCase = self.get_feature_extractor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = self.get_decoder()
__lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=UpperCamelCase_ , feature_extractor=UpperCamelCase_ , decoder=UpperCamelCase_ )
__lowerCamelCase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__lowerCamelCase = processor.batch_decode(UpperCamelCase_ )
else:
with get_context(UpperCamelCase_ ).Pool() as pool:
__lowerCamelCase = processor.batch_decode(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = list(UpperCamelCase_ )
with get_context("""fork""" ).Pool() as p:
__lowerCamelCase = decoder.decode_beams_batch(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(UpperCamelCase_ , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(UpperCamelCase_ , decoded_processor.logit_score )
self.assertListEqual(UpperCamelCase_ , decoded_processor.lm_score )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.get_feature_extractor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = self.get_decoder()
__lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=UpperCamelCase_ , feature_extractor=UpperCamelCase_ , decoder=UpperCamelCase_ )
__lowerCamelCase = self._get_dummy_logits()
__lowerCamelCase = 15
__lowerCamelCase = -20.0
__lowerCamelCase = -4.0
__lowerCamelCase = processor.batch_decode(
UpperCamelCase_ , beam_width=UpperCamelCase_ , beam_prune_logp=UpperCamelCase_ , token_min_logp=UpperCamelCase_ , )
__lowerCamelCase = decoded_processor_out.text
__lowerCamelCase = list(UpperCamelCase_ )
with get_context("""fork""" ).Pool() as pool:
__lowerCamelCase = decoder.decode_beams_batch(
UpperCamelCase_ , UpperCamelCase_ , beam_width=UpperCamelCase_ , beam_prune_logp=UpperCamelCase_ , token_min_logp=UpperCamelCase_ , )
__lowerCamelCase = [d[0][0] for d in decoded_decoder_out]
__lowerCamelCase = [d[0][2] for d in decoded_decoder_out]
__lowerCamelCase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , UpperCamelCase_ )
self.assertTrue(np.array_equal(UpperCamelCase_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , UpperCamelCase_ , atol=1E-3 ) )
self.assertTrue(np.array_equal(UpperCamelCase_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , UpperCamelCase_ , atol=1E-3 ) )
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = self.get_feature_extractor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = self.get_decoder()
__lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=UpperCamelCase_ , feature_extractor=UpperCamelCase_ , decoder=UpperCamelCase_ )
__lowerCamelCase = self._get_dummy_logits()
__lowerCamelCase = 2.0
__lowerCamelCase = 5.0
__lowerCamelCase = -20.0
__lowerCamelCase = True
__lowerCamelCase = processor.batch_decode(
UpperCamelCase_ , alpha=UpperCamelCase_ , beta=UpperCamelCase_ , unk_score_offset=UpperCamelCase_ , lm_score_boundary=UpperCamelCase_ , )
__lowerCamelCase = decoded_processor_out.text
__lowerCamelCase = list(UpperCamelCase_ )
decoder.reset_params(
alpha=UpperCamelCase_ , beta=UpperCamelCase_ , unk_score_offset=UpperCamelCase_ , lm_score_boundary=UpperCamelCase_ , )
with get_context("""fork""" ).Pool() as pool:
__lowerCamelCase = decoder.decode_beams_batch(
UpperCamelCase_ , UpperCamelCase_ , )
__lowerCamelCase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , UpperCamelCase_ )
__lowerCamelCase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowerCamelCase = processor.decoder.model_container[processor.decoder._model_key]
__lowerCamelCase = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__lowerCamelCase = os.listdir(UpperCamelCase_ )
__lowerCamelCase = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = snapshot_download("""hf-internal-testing/processor_with_lm""" )
__lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained(UpperCamelCase_ )
__lowerCamelCase = processor.decoder.model_container[processor.decoder._model_key]
__lowerCamelCase = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__lowerCamelCase = os.listdir(UpperCamelCase_ )
__lowerCamelCase = os.listdir(UpperCamelCase_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowerCamelCase = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowerCamelCase = floats_list((3, 10_00) )
__lowerCamelCase = processor_wavaveca(UpperCamelCase_ , return_tensors="""np""" )
__lowerCamelCase = processor_auto(UpperCamelCase_ , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
__lowerCamelCase = self._get_dummy_logits()
__lowerCamelCase = processor_wavaveca.batch_decode(UpperCamelCase_ )
__lowerCamelCase = processor_auto.batch_decode(UpperCamelCase_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = self.get_feature_extractor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = self.get_decoder()
__lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=UpperCamelCase_ , feature_extractor=UpperCamelCase_ , decoder=UpperCamelCase_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def lowerCAmelCase__ ( UpperCamelCase_: Any , UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = [d[key] for d in offsets]
return retrieved_list
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowerCamelCase = self._get_dummy_logits()[0]
__lowerCamelCase = processor.decode(UpperCamelCase_ , output_word_offsets=UpperCamelCase_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(UpperCamelCase_ , UpperCamelCase_ ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowerCamelCase = self._get_dummy_logits()
__lowerCamelCase = processor.batch_decode(UpperCamelCase_ , output_word_offsets=UpperCamelCase_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(UpperCamelCase_ , UpperCamelCase_ ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(UpperCamelCase_ , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowerCAmelCase__ ( self: Dict ):
import torch
__lowerCamelCase = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=UpperCamelCase_ )
__lowerCamelCase = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=1_60_00 ) )
__lowerCamelCase = iter(UpperCamelCase_ )
__lowerCamelCase = next(UpperCamelCase_ )
__lowerCamelCase = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
__lowerCamelCase = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__lowerCamelCase = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
__lowerCamelCase = model(UpperCamelCase_ ).logits.cpu().numpy()
__lowerCamelCase = processor.decode(logits[0] , output_word_offsets=UpperCamelCase_ )
__lowerCamelCase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__lowerCamelCase = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
__lowerCamelCase = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(UpperCamelCase_ , """word""" ) ) , UpperCamelCase_ )
self.assertEqual(""" """.join(self.get_from_offsets(UpperCamelCase_ , """word""" ) ) , output.text )
# output times
__lowerCamelCase = torch.tensor(self.get_from_offsets(UpperCamelCase_ , """start_time""" ) )
__lowerCamelCase = torch.tensor(self.get_from_offsets(UpperCamelCase_ , """end_time""" ) )
# fmt: off
__lowerCamelCase = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
__lowerCamelCase = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=0.01 ) )
self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=0.01 ) )
| 363
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase)
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: str , **UpperCamelCase_: int ):
super().__init__(**UpperCamelCase_ )
if self.framework == "tf":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
requires_backends(self , """vision""" )
self.check_model_type(UpperCamelCase_ )
def __call__( self: Union[str, Any] , UpperCamelCase_: Union[str, "Image.Image", List[Dict[str, Any]]] , UpperCamelCase_: Union[str, List[str]] = None , **UpperCamelCase_: List[str] , ):
if "text_queries" in kwargs:
__lowerCamelCase = kwargs.pop("""text_queries""" )
if isinstance(UpperCamelCase_ , (str, Image.Image) ):
__lowerCamelCase = {"""image""": image, """candidate_labels""": candidate_labels}
else:
__lowerCamelCase = image
__lowerCamelCase = super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
return results
def lowerCAmelCase__ ( self: List[str] , **UpperCamelCase_: Dict ):
__lowerCamelCase = {}
if "threshold" in kwargs:
__lowerCamelCase = kwargs["""threshold"""]
if "top_k" in kwargs:
__lowerCamelCase = kwargs["""top_k"""]
return {}, {}, postprocess_params
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = load_image(inputs["""image"""] )
__lowerCamelCase = inputs["""candidate_labels"""]
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = candidate_labels.split(""",""" )
__lowerCamelCase = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(UpperCamelCase_ ):
__lowerCamelCase = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework )
__lowerCamelCase = self.image_processor(UpperCamelCase_ , return_tensors=self.framework )
yield {
"is_last": i == len(UpperCamelCase_ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Tuple ):
__lowerCamelCase = model_inputs.pop("""target_size""" )
__lowerCamelCase = model_inputs.pop("""candidate_label""" )
__lowerCamelCase = model_inputs.pop("""is_last""" )
__lowerCamelCase = self.model(**UpperCamelCase_ )
__lowerCamelCase = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs}
return model_outputs
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: Dict=0.1 , UpperCamelCase_: Union[str, Any]=None ):
__lowerCamelCase = []
for model_output in model_outputs:
__lowerCamelCase = model_output["""candidate_label"""]
__lowerCamelCase = BaseModelOutput(UpperCamelCase_ )
__lowerCamelCase = self.image_processor.post_process_object_detection(
outputs=UpperCamelCase_ , threshold=UpperCamelCase_ , target_sizes=model_output["""target_size"""] )[0]
for index in outputs["scores"].nonzero():
__lowerCamelCase = outputs["""scores"""][index].item()
__lowerCamelCase = self._get_bounding_box(outputs["""boxes"""][index][0] )
__lowerCamelCase = {"""score""": score, """label""": label, """box""": box}
results.append(UpperCamelCase_ )
__lowerCamelCase = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : x["score"] , reverse=UpperCamelCase_ )
if top_k:
__lowerCamelCase = results[:top_k]
return results
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: "torch.Tensor" ):
if self.framework != "pt":
raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""" )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = box.int().tolist()
__lowerCamelCase = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 29
| 0
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/config.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/config.json',
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Any = 'xlnet'
UpperCAmelCase__ : Union[str, Any] = ['mems']
UpperCAmelCase__ : Union[str, Any] = {
'n_token': 'vocab_size', # Backward compatibility
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self: Optional[Any] , UpperCamelCase_: List[str]=3_20_00 , UpperCamelCase_: Tuple=10_24 , UpperCamelCase_: int=24 , UpperCamelCase_: Union[str, Any]=16 , UpperCamelCase_: str=40_96 , UpperCamelCase_: Optional[Any]="gelu" , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: List[Any]="bi" , UpperCamelCase_: Dict=0.02 , UpperCamelCase_: List[str]=1E-12 , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: Optional[Any]=5_12 , UpperCamelCase_: Any=None , UpperCamelCase_: str=True , UpperCamelCase_: List[Any]=False , UpperCamelCase_: int=False , UpperCamelCase_: Tuple=-1 , UpperCamelCase_: Optional[int]=False , UpperCamelCase_: str="last" , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: int="tanh" , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: str=5 , UpperCamelCase_: Dict=5 , UpperCamelCase_: int=5 , UpperCamelCase_: List[str]=1 , UpperCamelCase_: Any=2 , **UpperCamelCase_: Optional[int] , ):
__lowerCamelCase : str = vocab_size
__lowerCamelCase : Any = d_model
__lowerCamelCase : str = n_layer
__lowerCamelCase : Any = n_head
if d_model % n_head != 0:
raise ValueError(F'\'d_model % n_head\' ({d_model % n_head}) should be equal to 0' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F'`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})' )
__lowerCamelCase : List[str] = d_model // n_head
__lowerCamelCase : int = ff_activation
__lowerCamelCase : Tuple = d_inner
__lowerCamelCase : List[str] = untie_r
__lowerCamelCase : Any = attn_type
__lowerCamelCase : List[Any] = initializer_range
__lowerCamelCase : List[str] = layer_norm_eps
__lowerCamelCase : int = dropout
__lowerCamelCase : Union[str, Any] = mem_len
__lowerCamelCase : Optional[Any] = reuse_len
__lowerCamelCase : Optional[Any] = bi_data
__lowerCamelCase : Optional[int] = clamp_len
__lowerCamelCase : Optional[int] = same_length
__lowerCamelCase : Any = summary_type
__lowerCamelCase : Optional[int] = summary_use_proj
__lowerCamelCase : List[str] = summary_activation
__lowerCamelCase : List[str] = summary_last_dropout
__lowerCamelCase : Tuple = start_n_top
__lowerCamelCase : Optional[int] = end_n_top
__lowerCamelCase : Tuple = bos_token_id
__lowerCamelCase : Optional[Any] = pad_token_id
__lowerCamelCase : Any = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"""The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"""
""" instead.""" , UpperCamelCase_ , )
__lowerCamelCase : List[Any] = kwargs["""use_cache"""]
__lowerCamelCase : str = use_mems_eval
__lowerCamelCase : Tuple = use_mems_train
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
@property
def lowerCAmelCase__ ( self: str ):
logger.info(F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Union[str, Any] ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 364
|
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece.model')
UpperCAmelCase_ = {'target_lang': 'fi', 'source_lang': 'en'}
UpperCAmelCase_ = '>>zh<<'
UpperCAmelCase_ = 'Helsinki-NLP/'
if is_torch_available():
UpperCAmelCase_ = 'pt'
elif is_tf_available():
UpperCAmelCase_ = 'tf'
else:
UpperCAmelCase_ = 'jax'
@require_sentencepiece
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Union[str, Any] = MarianTokenizer
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : int = True
def lowerCAmelCase__ ( self: Union[str, Any] ):
super().setUp()
__lowerCamelCase = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
__lowerCamelCase = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__lowerCamelCase = Path(self.tmpdirname )
save_json(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
__lowerCamelCase = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self: Optional[Any] , **UpperCamelCase_: Any ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Optional[int] ):
return (
"This is a test",
"This is a test",
)
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = """</s>"""
__lowerCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(UpperCamelCase_ ) , 9 )
def lowerCAmelCase__ ( self: Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = MarianTokenizer.from_pretrained(F'{ORG_NAME}opus-mt-en-de' )
__lowerCamelCase = en_de_tokenizer(["""I am a small frog"""] , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = [38, 1_21, 14, 6_97, 3_88_48, 0]
self.assertListEqual(UpperCamelCase_ , batch.input_ids[0] )
__lowerCamelCase = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = [x.name for x in Path(UpperCamelCase_ ).glob("""*""" )]
self.assertIn("""source.spm""" , UpperCamelCase_ )
MarianTokenizer.from_pretrained(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = tok(
["""I am a small frog""" * 10_00, """I am a small frog"""] , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 5_12) )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def lowerCAmelCase__ ( self: Optional[int] ):
# fmt: off
__lowerCamelCase = {"""input_ids""": [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase_ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
__lowerCamelCase = """Tämä on testi"""
__lowerCamelCase = """This is a test"""
__lowerCamelCase = [76, 7, 20_47, 2]
__lowerCamelCase = [69, 12, 11, 9_40, 2]
__lowerCamelCase = tokenizer(UpperCamelCase_ ).input_ids
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = tokenizer(text_target=UpperCamelCase_ ).input_ids
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
| 29
| 0
|
from itertools import count
def lowerCamelCase__ ( A__ : int = 50 ):
'''simple docstring'''
__lowerCamelCase = [1] * min_block_length
for n in count(A__ ):
fill_count_functions.append(1 )
for block_length in range(A__ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1000000:
break
return n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 365
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class lowerCamelCase__( unittest.TestCase):
@parameterized.expand([(None,), ("""foo.json""",)] )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: List[str] ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCamelCase_ , config_name=UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ , config_name=UpperCamelCase_ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , UpperCamelCase_ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = AutoConfig.from_pretrained("""gpt2""" )
__lowerCamelCase = GenerationConfig.from_model_config(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = GenerationConfig()
__lowerCamelCase = {
"""max_new_tokens""": 10_24,
"""foo""": """bar""",
}
__lowerCamelCase = copy.deepcopy(UpperCamelCase_ )
__lowerCamelCase = generation_config.update(**UpperCamelCase_ )
# update_kwargs was not modified (no side effects)
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 10_24 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(UpperCamelCase_ , {"""foo""": """bar"""} )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = GenerationConfig()
__lowerCamelCase = """bar"""
with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir:
generation_config.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""" )
__lowerCamelCase = GenerationConfig.from_model_config(UpperCamelCase_ )
assert not hasattr(UpperCamelCase_ , """foo""" ) # no new kwargs should be initialized if from config
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , UpperCamelCase_ )
self.assertEqual(default_config.num_beams , 1 )
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , UpperCamelCase_ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , UpperCamelCase_ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class lowerCamelCase__( unittest.TestCase):
@classmethod
def lowerCAmelCase__ ( cls: Optional[Any] ):
__lowerCamelCase = TOKEN
HfFolder.save_token(UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: str ):
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" )
except HTTPError:
pass
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained(F'{USER}/test-generation-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCamelCase_ , repo_id="""test-generation-config""" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained(F'{USER}/test-generation-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCamelCase_ , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
| 29
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Dict = 'ctrl'
UpperCAmelCase__ : Dict = ['past_key_values']
UpperCAmelCase__ : List[Any] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self: List[str] , UpperCamelCase_: Union[str, Any]=24_65_34 , UpperCamelCase_: int=2_56 , UpperCamelCase_: List[Any]=12_80 , UpperCamelCase_: List[str]=81_92 , UpperCamelCase_: int=48 , UpperCamelCase_: int=16 , UpperCamelCase_: Union[str, Any]=0.1 , UpperCamelCase_: Union[str, Any]=0.1 , UpperCamelCase_: List[Any]=1E-6 , UpperCamelCase_: int=0.02 , UpperCamelCase_: List[Any]=True , **UpperCamelCase_: Optional[int] , ):
__lowerCamelCase = vocab_size
__lowerCamelCase = n_positions
__lowerCamelCase = n_embd
__lowerCamelCase = n_layer
__lowerCamelCase = n_head
__lowerCamelCase = dff
__lowerCamelCase = resid_pdrop
__lowerCamelCase = embd_pdrop
__lowerCamelCase = layer_norm_epsilon
__lowerCamelCase = initializer_range
__lowerCamelCase = use_cache
super().__init__(**UpperCamelCase_ )
| 366
|
def lowerCamelCase__ ( A__ : list ):
'''simple docstring'''
for i in range(len(A__ ) - 1 , 0 , -1 ):
__lowerCamelCase = False
for j in range(A__ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
__lowerCamelCase, __lowerCamelCase = unsorted[j - 1], unsorted[j]
__lowerCamelCase = True
for j in range(A__ ):
if unsorted[j] > unsorted[j + 1]:
__lowerCamelCase, __lowerCamelCase = unsorted[j + 1], unsorted[j]
__lowerCamelCase = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ = [int(item) for item in user_input.split(',')]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 29
| 0
|
import math
import sys
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
__lowerCamelCase = ''
try:
with open(A__ , 'rb' ) as binary_file:
__lowerCamelCase = binary_file.read()
for dat in data:
__lowerCamelCase = f'{dat:08b}'
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
__lowerCamelCase = {'0': '0', '1': '1'}
__lowerCamelCase, __lowerCamelCase = '', ''
__lowerCamelCase = len(A__ )
for i in range(len(A__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__lowerCamelCase = lexicon[curr_string]
result += last_match_id
__lowerCamelCase = last_match_id + '0'
if math.loga(A__ ).is_integer():
__lowerCamelCase = {}
for curr_key in list(A__ ):
__lowerCamelCase = lexicon.pop(A__ )
__lowerCamelCase = new_lex
__lowerCamelCase = last_match_id + '1'
index += 1
__lowerCamelCase = ''
return result
def lowerCamelCase__ ( A__ : str , A__ : str ):
'''simple docstring'''
__lowerCamelCase = 8
try:
with open(A__ , 'wb' ) as opened_file:
__lowerCamelCase = [
to_write[i : i + byte_length]
for i in range(0 , len(A__ ) , A__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(A__ , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
__lowerCamelCase = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
__lowerCamelCase = data_bits[counter:]
__lowerCamelCase = data_bits[counter + 1 :]
return data_bits
def lowerCamelCase__ ( A__ : str , A__ : str ):
'''simple docstring'''
__lowerCamelCase = read_file_binary(A__ )
__lowerCamelCase = remove_prefix(A__ )
__lowerCamelCase = decompress_data(A__ )
write_file_binary(A__ , A__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 367
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowerCamelCase__ ( A__ : Dict , A__ : Optional[int]=False ):
'''simple docstring'''
try:
__lowerCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__lowerCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
__lowerCamelCase = strtobool(A__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
UpperCAmelCase_ = parse_flag_from_env('RUN_SLOW', default=False)
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skip("""Test was skipped""" )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , """test is slow""" )(A__ )
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , """test requires only a CPU""" )(A__ )
def lowerCamelCase__ ( A__ : List[str] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , """test requires a GPU""" )(A__ )
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , """test requires a XPU""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , """test requires a `mps` backend support in `torch`""" )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , """test requires the Hugging Face suite""" )(A__ )
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , """test requires the bitsandbytes library""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , """test requires TPU""" )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , """test requires a GPU""" )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , """test requires a XPU""" )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , """test requires multiple GPUs""" )(A__ )
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , """test requires multiple XPUs""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , """test requires safetensors""" )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , """test requires DeepSpeed""" )(A__ )
def lowerCamelCase__ ( A__ : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version(""">=""" , """1.12.0""" ) , """test requires torch version >= 1.12.0""" )(A__ )
def lowerCamelCase__ ( A__ : Tuple=None , A__ : Optional[Any]=None ):
'''simple docstring'''
if test_case is None:
return partial(A__ , version=A__ )
return unittest.skipUnless(is_torch_version(""">=""" , A__ ) , f'test requires torch version >= {version}' )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , """test requires Tensorboard""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , """test requires wandb""" )(A__ )
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , """test requires comet_ml""" )(A__ )
UpperCAmelCase_ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , """test requires at least one tracker to be available and for `comet_ml` to not be installed""" , )(A__ )
class lowerCamelCase__( unittest.TestCase):
UpperCAmelCase__ : List[Any] = True
@classmethod
def lowerCAmelCase__ ( cls: int ):
__lowerCamelCase = tempfile.mkdtemp()
@classmethod
def lowerCAmelCase__ ( cls: Any ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def lowerCAmelCase__ ( self: Any ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("""**/*""" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCamelCase_ )
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: int ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Union[mock.Mock, List[mock.Mock]] ):
__lowerCamelCase = mocks if isinstance(UpperCamelCase_ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase = AcceleratorState()
__lowerCamelCase = tensor[None].clone().to(state.device )
__lowerCamelCase = gather(A__ ).cpu()
__lowerCamelCase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , A__ ):
return False
return True
class lowerCamelCase__:
def __init__( self: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: Any , UpperCamelCase_: Any ):
__lowerCamelCase = returncode
__lowerCamelCase = stdout
__lowerCamelCase = stderr
async def lowerCamelCase__ ( A__ : int , A__ : Any ):
'''simple docstring'''
while True:
__lowerCamelCase = await stream.readline()
if line:
callback(A__ )
else:
break
async def lowerCamelCase__ ( A__ : Dict , A__ : List[str]=None , A__ : Any=None , A__ : Optional[Any]=None , A__ : Tuple=False , A__ : List[Any]=False ):
'''simple docstring'''
if echo:
print("""\nRunning: """ , """ """.join(A__ ) )
__lowerCamelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=A__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=A__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__lowerCamelCase = []
__lowerCamelCase = []
def tee(A__ : int , A__ : Any , A__ : Optional[Any] , A__ : int="" ):
__lowerCamelCase = line.decode("""utf-8""" ).rstrip()
sink.append(A__ )
if not quiet:
print(A__ , A__ , file=A__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda A__ : tee(A__ , A__ , sys.stdout , label="""stdout:""" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda A__ : tee(A__ , A__ , sys.stderr , label="""stderr:""" ) ) ),
] , timeout=A__ , )
return _RunOutput(await p.wait() , A__ , A__ )
def lowerCamelCase__ ( A__ : Optional[Any] , A__ : Any=None , A__ : Union[str, Any]=None , A__ : Dict=180 , A__ : str=False , A__ : List[Any]=True ):
'''simple docstring'''
__lowerCamelCase = asyncio.get_event_loop()
__lowerCamelCase = loop.run_until_complete(
_stream_subprocess(A__ , env=A__ , stdin=A__ , timeout=A__ , quiet=A__ , echo=A__ ) )
__lowerCamelCase = """ """.join(A__ )
if result.returncode > 0:
__lowerCamelCase = """\n""".join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
return result
class lowerCamelCase__( __lowerCamelCase):
pass
def lowerCamelCase__ ( A__ : List[str] , A__ : Union[str, Any]=False ):
'''simple docstring'''
try:
__lowerCamelCase = subprocess.check_output(A__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(A__ , """decode""" ):
__lowerCamelCase = output.decode("""utf-8""" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'Command `{" ".join(A__ )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 29
| 0
|
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
def lowerCamelCase__ ( A__ : type , A__ : Optional[str] , A__ : Optional[List[str]] = None , ):
'''simple docstring'''
__lowerCamelCase = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})' )
__lowerCamelCase = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})' )
__lowerCamelCase = format_type
def lowerCamelCase__ ( A__ : Exception , A__ : Optional[str] , A__ : Optional[List[str]] = None ):
'''simple docstring'''
__lowerCamelCase = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
__lowerCamelCase = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
UpperCAmelCase_ = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
UpperCAmelCase_ = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
UpperCAmelCase_ = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def lowerCamelCase__ ( A__ : Optional[str] ):
'''simple docstring'''
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def lowerCamelCase__ ( A__ : Optional[str] , **A__ : Dict ):
'''simple docstring'''
__lowerCamelCase = get_format_type_from_alias(A__ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**A__ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'' )
| 368
|
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
UpperCAmelCase_ = datasets.utils.logging.get_logger(__name__)
class lowerCamelCase__( folder_based_builder.FolderBasedBuilderConfig):
UpperCAmelCase__ : bool = None
UpperCAmelCase__ : bool = None
class lowerCamelCase__( folder_based_builder.FolderBasedBuilder):
UpperCAmelCase__ : List[Any] = datasets.Audio()
UpperCAmelCase__ : str = 'audio'
UpperCAmelCase__ : Union[str, Any] = AudioFolderConfig
UpperCAmelCase__ : List[str] # definition at the bottom of the script
UpperCAmelCase__ : Optional[int] = AudioClassification(audio_column='audio' , label_column='label')
UpperCAmelCase_ = [
'.aiff',
'.au',
'.avr',
'.caf',
'.flac',
'.htk',
'.svx',
'.mat4',
'.mat5',
'.mpc2k',
'.ogg',
'.paf',
'.pvf',
'.raw',
'.rf64',
'.sd2',
'.sds',
'.ircam',
'.voc',
'.w64',
'.wav',
'.nist',
'.wavex',
'.wve',
'.xi',
'.mp3',
'.opus',
]
UpperCAmelCase_ = AUDIO_EXTENSIONS
| 29
| 0
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {'vocab_file': 'spiece.model'}
UpperCAmelCase_ = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
UpperCAmelCase_ = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
UpperCAmelCase_ = '▁'
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Any = VOCAB_FILES_NAMES
UpperCAmelCase__ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self: Dict , UpperCamelCase_: Any , UpperCamelCase_: str=True , UpperCamelCase_: str=True , UpperCamelCase_: List[Any]=False , UpperCamelCase_: int="[CLS]" , UpperCamelCase_: Dict="[SEP]" , UpperCamelCase_: Any="<unk>" , UpperCamelCase_: Any="[SEP]" , UpperCamelCase_: Any="<pad>" , UpperCamelCase_: Union[str, Any]="[CLS]" , UpperCamelCase_: str="[MASK]" , UpperCamelCase_: Optional[Dict[str, Any]] = None , **UpperCamelCase_: Union[str, Any] , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__lowerCamelCase = (
AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ , normalized=UpperCamelCase_ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ )
else mask_token
)
__lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase_ , remove_space=UpperCamelCase_ , keep_accents=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
__lowerCamelCase = do_lower_case
__lowerCamelCase = remove_space
__lowerCamelCase = keep_accents
__lowerCamelCase = vocab_file
__lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase_ )
@property
def lowerCAmelCase__ ( self: Optional[int] ):
return len(self.sp_model )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self: Any ):
__lowerCamelCase = self.__dict__.copy()
__lowerCamelCase = None
return state
def __setstate__( self: Dict , UpperCamelCase_: Dict ):
__lowerCamelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__lowerCamelCase = {}
__lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Union[str, Any] ):
if self.remove_space:
__lowerCamelCase = """ """.join(inputs.strip().split() )
else:
__lowerCamelCase = inputs
__lowerCamelCase = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
__lowerCamelCase = unicodedata.normalize("""NFKD""" , UpperCamelCase_ )
__lowerCamelCase = """""".join([c for c in outputs if not unicodedata.combining(UpperCamelCase_ )] )
if self.do_lower_case:
__lowerCamelCase = outputs.lower()
return outputs
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: str ):
__lowerCamelCase = self.preprocess_text(UpperCamelCase_ )
__lowerCamelCase = self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
__lowerCamelCase = []
for piece in pieces:
if len(UpperCamelCase_ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
__lowerCamelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase_ , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__lowerCamelCase = cur_pieces[1:]
else:
__lowerCamelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCamelCase_ )
else:
new_pieces.append(UpperCamelCase_ )
return new_pieces
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Dict ):
return self.sp_model.PieceToId(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: int ):
return self.sp_model.IdToPiece(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Tuple ):
__lowerCamelCase = []
__lowerCamelCase = """"""
__lowerCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCamelCase_ ) + token
__lowerCamelCase = True
__lowerCamelCase = []
else:
current_sub_tokens.append(UpperCamelCase_ )
__lowerCamelCase = False
out_string += self.sp_model.decode(UpperCamelCase_ )
return out_string.strip()
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ):
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None , UpperCamelCase_: bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ):
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__lowerCamelCase = os.path.join(
UpperCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , """wb""" ) as fi:
__lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
| 369
|
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : str = 'segformer'
def __init__( self: Union[str, Any] , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: Any=4 , UpperCamelCase_: int=[2, 2, 2, 2] , UpperCamelCase_: Optional[Any]=[8, 4, 2, 1] , UpperCamelCase_: Union[str, Any]=[32, 64, 1_60, 2_56] , UpperCamelCase_: int=[7, 3, 3, 3] , UpperCamelCase_: Dict=[4, 2, 2, 2] , UpperCamelCase_: str=[1, 2, 5, 8] , UpperCamelCase_: List[str]=[4, 4, 4, 4] , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: List[Any]=0.0 , UpperCamelCase_: List[Any]=0.0 , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: Optional[int]=1E-6 , UpperCamelCase_: Optional[int]=2_56 , UpperCamelCase_: Optional[Any]=2_55 , **UpperCamelCase_: List[Any] , ):
super().__init__(**UpperCamelCase_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , UpperCamelCase_ , )
__lowerCamelCase = num_channels
__lowerCamelCase = num_encoder_blocks
__lowerCamelCase = depths
__lowerCamelCase = sr_ratios
__lowerCamelCase = hidden_sizes
__lowerCamelCase = patch_sizes
__lowerCamelCase = strides
__lowerCamelCase = mlp_ratios
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = classifier_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = drop_path_rate
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = decoder_hidden_size
__lowerCamelCase = kwargs.get("""reshape_last_stage""" , UpperCamelCase_ )
__lowerCamelCase = semantic_loss_ignore_index
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Any = version.parse('1.11')
@property
def lowerCAmelCase__ ( self: Any ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase__ ( self: Union[str, Any] ):
return 1E-4
@property
def lowerCAmelCase__ ( self: Dict ):
return 12
| 29
| 0
|
UpperCAmelCase_ = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 370
|
import string
import numpy
def lowerCamelCase__ ( A__ : int , A__ : int ):
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a , A__ )
class lowerCamelCase__:
UpperCAmelCase__ : Optional[int] = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
UpperCAmelCase__ : Optional[int] = numpy.vectorize(lambda __lowerCamelCase: x % 36)
UpperCAmelCase__ : List[Any] = numpy.vectorize(__lowerCamelCase)
def __init__( self: List[Any] , UpperCamelCase_: numpy.ndarray ):
__lowerCamelCase = self.modulus(UpperCamelCase_ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__lowerCamelCase = encrypt_key.shape[0]
def lowerCAmelCase__ ( self: str , UpperCamelCase_: str ):
return self.key_string.index(UpperCamelCase_ )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: int ):
return self.key_string[round(UpperCamelCase_ )]
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__lowerCamelCase = det % len(self.key_string )
__lowerCamelCase = len(self.key_string )
if greatest_common_divisor(UpperCamelCase_ , len(self.key_string ) ) != 1:
__lowerCamelCase = (
F'determinant modular {req_l} of encryption key({det}) '
F'is not co prime w.r.t {req_l}.\nTry another key.'
)
raise ValueError(UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: str ):
__lowerCamelCase = [char for char in text.upper() if char in self.key_string]
__lowerCamelCase = chars[-1]
while len(UpperCamelCase_ ) % self.break_key != 0:
chars.append(UpperCamelCase_ )
return "".join(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
__lowerCamelCase = self.process_text(text.upper() )
__lowerCamelCase = """"""
for i in range(0 , len(UpperCamelCase_ ) - self.break_key + 1 , self.break_key ):
__lowerCamelCase = text[i : i + self.break_key]
__lowerCamelCase = [self.replace_letters(UpperCamelCase_ ) for char in batch]
__lowerCamelCase = numpy.array([vec] ).T
__lowerCamelCase = self.modulus(self.encrypt_key.dot(UpperCamelCase_ ) ).T.tolist()[
0
]
__lowerCamelCase = """""".join(
self.replace_digits(UpperCamelCase_ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__lowerCamelCase = det % len(self.key_string )
__lowerCamelCase = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
__lowerCamelCase = i
break
__lowerCamelCase = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
__lowerCamelCase = self.make_decrypt_key()
__lowerCamelCase = self.process_text(text.upper() )
__lowerCamelCase = """"""
for i in range(0 , len(UpperCamelCase_ ) - self.break_key + 1 , self.break_key ):
__lowerCamelCase = text[i : i + self.break_key]
__lowerCamelCase = [self.replace_letters(UpperCamelCase_ ) for char in batch]
__lowerCamelCase = numpy.array([vec] ).T
__lowerCamelCase = self.modulus(decrypt_key.dot(UpperCamelCase_ ) ).T.tolist()[0]
__lowerCamelCase = """""".join(
self.replace_digits(UpperCamelCase_ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = int(input("""Enter the order of the encryption key: """ ) )
__lowerCamelCase = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(A__ ):
__lowerCamelCase = [int(A__ ) for x in input().split()]
hill_matrix.append(A__ )
__lowerCamelCase = HillCipher(numpy.array(A__ ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
__lowerCamelCase = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
__lowerCamelCase = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(A__ ) )
elif option == "2":
__lowerCamelCase = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(A__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 29
| 0
|
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def lowerCamelCase__ ( A__ : Optional[Any] ): # picklable for multiprocessing
'''simple docstring'''
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def lowerCamelCase__ ( ):
'''simple docstring'''
with parallel_backend("""spark""" ):
assert ParallelBackendConfig.backend_name == "spark"
__lowerCamelCase = [1, 2, 3]
with pytest.raises(A__ ):
with parallel_backend("""unsupported backend""" ):
map_nested(A__ , A__ , num_proc=2 )
with pytest.raises(A__ ):
with parallel_backend("""unsupported backend""" ):
map_nested(A__ , A__ , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("""num_proc""" , [2, -1] )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
__lowerCamelCase = [1, 2]
__lowerCamelCase = {"""a""": 1, """b""": 2}
__lowerCamelCase = {"""a""": [1, 2], """b""": [3, 4]}
__lowerCamelCase = {"""a""": {"""1""": 1}, """b""": 2}
__lowerCamelCase = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
__lowerCamelCase = [2, 3]
__lowerCamelCase = {"""a""": 2, """b""": 3}
__lowerCamelCase = {"""a""": [2, 3], """b""": [4, 5]}
__lowerCamelCase = {"""a""": {"""1""": 2}, """b""": 3}
__lowerCamelCase = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
with parallel_backend("""spark""" ):
assert map_nested(A__ , A__ , num_proc=A__ ) == expected_map_nested_sa
assert map_nested(A__ , A__ , num_proc=A__ ) == expected_map_nested_sa
assert map_nested(A__ , A__ , num_proc=A__ ) == expected_map_nested_sa
assert map_nested(A__ , A__ , num_proc=A__ ) == expected_map_nested_sa
assert map_nested(A__ , A__ , num_proc=A__ ) == expected_map_nested_sa
| 371
|
import qiskit
def lowerCamelCase__ ( A__ : int , A__ : int ):
'''simple docstring'''
__lowerCamelCase = qiskit.Aer.get_backend("""aer_simulator""" )
__lowerCamelCase = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
__lowerCamelCase = qiskit.execute(A__ , A__ , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(A__ )
if __name__ == "__main__":
UpperCAmelCase_ = half_adder(1, 1)
print(f"""Half Adder Output Qubit Counts: {counts}""")
| 29
| 0
|
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece.model')
UpperCAmelCase_ = {'target_lang': 'fi', 'source_lang': 'en'}
UpperCAmelCase_ = '>>zh<<'
UpperCAmelCase_ = 'Helsinki-NLP/'
if is_torch_available():
UpperCAmelCase_ = 'pt'
elif is_tf_available():
UpperCAmelCase_ = 'tf'
else:
UpperCAmelCase_ = 'jax'
@require_sentencepiece
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Union[str, Any] = MarianTokenizer
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : int = True
def lowerCAmelCase__ ( self: Union[str, Any] ):
super().setUp()
__lowerCamelCase = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
__lowerCamelCase = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__lowerCamelCase = Path(self.tmpdirname )
save_json(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
__lowerCamelCase = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self: Optional[Any] , **UpperCamelCase_: Any ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Optional[int] ):
return (
"This is a test",
"This is a test",
)
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = """</s>"""
__lowerCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(UpperCamelCase_ ) , 9 )
def lowerCAmelCase__ ( self: Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = MarianTokenizer.from_pretrained(F'{ORG_NAME}opus-mt-en-de' )
__lowerCamelCase = en_de_tokenizer(["""I am a small frog"""] , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = [38, 1_21, 14, 6_97, 3_88_48, 0]
self.assertListEqual(UpperCamelCase_ , batch.input_ids[0] )
__lowerCamelCase = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = [x.name for x in Path(UpperCamelCase_ ).glob("""*""" )]
self.assertIn("""source.spm""" , UpperCamelCase_ )
MarianTokenizer.from_pretrained(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = tok(
["""I am a small frog""" * 10_00, """I am a small frog"""] , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 5_12) )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def lowerCAmelCase__ ( self: Optional[int] ):
# fmt: off
__lowerCamelCase = {"""input_ids""": [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase_ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
__lowerCamelCase = """Tämä on testi"""
__lowerCamelCase = """This is a test"""
__lowerCamelCase = [76, 7, 20_47, 2]
__lowerCamelCase = [69, 12, 11, 9_40, 2]
__lowerCamelCase = tokenizer(UpperCamelCase_ ).input_ids
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = tokenizer(text_target=UpperCamelCase_ ).input_ids
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
| 350
|
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = [[0 for _ in range(A__ )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__lowerCamelCase = 1
for n in range(m + 1 ):
for k in range(1 , A__ ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
UpperCAmelCase_ = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
UpperCAmelCase_ = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 29
| 0
|
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: List[str] , UpperCamelCase_: NestedDataStructureLike[PathLike] , UpperCamelCase_: Optional[NamedSplit] = None , UpperCamelCase_: Optional[Features] = None , UpperCamelCase_: str = None , UpperCamelCase_: bool = False , UpperCamelCase_: bool = False , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional[int] = None , **UpperCamelCase_: Any , ):
super().__init__(
UpperCamelCase_ , split=UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , num_proc=UpperCamelCase_ , **UpperCamelCase_ , )
__lowerCamelCase = field
__lowerCamelCase = path_or_paths if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else {self.split: path_or_paths}
__lowerCamelCase = Json(
cache_dir=UpperCamelCase_ , data_files=UpperCamelCase_ , features=UpperCamelCase_ , field=UpperCamelCase_ , **UpperCamelCase_ , )
def lowerCAmelCase__ ( self: Any ):
# Build iterable dataset
if self.streaming:
__lowerCamelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
self.builder.download_and_prepare(
download_config=UpperCamelCase_ , download_mode=UpperCamelCase_ , verification_mode=UpperCamelCase_ , base_path=UpperCamelCase_ , num_proc=self.num_proc , )
__lowerCamelCase = self.builder.as_dataset(
split=self.split , verification_mode=UpperCamelCase_ , in_memory=self.keep_in_memory )
return dataset
class lowerCamelCase__:
def __init__( self: Union[str, Any] , UpperCamelCase_: Dataset , UpperCamelCase_: Union[PathLike, BinaryIO] , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[int] = None , **UpperCamelCase_: Union[str, Any] , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F'num_proc {num_proc} must be an integer > 0.' )
__lowerCamelCase = dataset
__lowerCamelCase = path_or_buf
__lowerCamelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__lowerCamelCase = num_proc
__lowerCamelCase = """utf-8"""
__lowerCamelCase = to_json_kwargs
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = self.to_json_kwargs.pop("""path_or_buf""" , UpperCamelCase_ )
__lowerCamelCase = self.to_json_kwargs.pop("""orient""" , """records""" )
__lowerCamelCase = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False )
__lowerCamelCase = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True )
__lowerCamelCase = self.to_json_kwargs.pop("""compression""" , UpperCamelCase_ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'`datasets` currently does not support {compression} compression' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , """wb""" , compression=UpperCamelCase_ ) as buffer:
__lowerCamelCase = self._write(file_obj=UpperCamelCase_ , orient=UpperCamelCase_ , lines=UpperCamelCase_ , index=UpperCamelCase_ , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F'The compression parameter is not supported when writing to a buffer, but compression={compression}'
""" was passed. Please provide a local path instead.""" )
__lowerCamelCase = self._write(
file_obj=self.path_or_buf , orient=UpperCamelCase_ , lines=UpperCamelCase_ , index=UpperCamelCase_ , **self.to_json_kwargs )
return written
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Any ):
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = args
__lowerCamelCase = query_table(
table=self.dataset.data , key=slice(UpperCamelCase_ , offset + self.batch_size ) , indices=self.dataset._indices , )
__lowerCamelCase = batch.to_pandas().to_json(
path_or_buf=UpperCamelCase_ , orient=UpperCamelCase_ , lines=UpperCamelCase_ , index=UpperCamelCase_ , **UpperCamelCase_ )
if not json_str.endswith("""\n""" ):
json_str += "\n"
return json_str.encode(self.encoding )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: BinaryIO , UpperCamelCase_: int , UpperCamelCase_: Dict , UpperCamelCase_: List[Any] , **UpperCamelCase_: Dict , ):
__lowerCamelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
__lowerCamelCase = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(UpperCamelCase_ )
else:
__lowerCamelCase, __lowerCamelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , UpperCamelCase_ , UpperCamelCase_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
written += file_obj.write(UpperCamelCase_ )
return written
| 351
|
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase)
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: Tuple , *UpperCamelCase_: Dict , **UpperCamelCase_: Optional[int] ):
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
requires_backends(self , """decord""" )
self.check_model_type(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: int=None , UpperCamelCase_: List[Any]=None , UpperCamelCase_: Optional[int]=None ):
__lowerCamelCase = {}
if frame_sampling_rate is not None:
__lowerCamelCase = frame_sampling_rate
if num_frames is not None:
__lowerCamelCase = num_frames
__lowerCamelCase = {}
if top_k is not None:
__lowerCamelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self: Any , UpperCamelCase_: Union[str, List[str]] , **UpperCamelCase_: str ):
return super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[str]=None , UpperCamelCase_: List[Any]=1 ):
if num_frames is None:
__lowerCamelCase = self.model.config.num_frames
if video.startswith("""http://""" ) or video.startswith("""https://""" ):
__lowerCamelCase = BytesIO(requests.get(UpperCamelCase_ ).content )
__lowerCamelCase = VideoReader(UpperCamelCase_ )
videoreader.seek(0 )
__lowerCamelCase = 0
__lowerCamelCase = num_frames * frame_sampling_rate - 1
__lowerCamelCase = np.linspace(UpperCamelCase_ , UpperCamelCase_ , num=UpperCamelCase_ , dtype=np.intaa )
__lowerCamelCase = videoreader.get_batch(UpperCamelCase_ ).asnumpy()
__lowerCamelCase = list(UpperCamelCase_ )
__lowerCamelCase = self.image_processor(UpperCamelCase_ , return_tensors=self.framework )
return model_inputs
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Any ):
__lowerCamelCase = self.model(**UpperCamelCase_ )
return model_outputs
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: List[Any] , UpperCamelCase_: Optional[int]=5 ):
if top_k > self.model.config.num_labels:
__lowerCamelCase = self.model.config.num_labels
if self.framework == "pt":
__lowerCamelCase = model_outputs.logits.softmax(-1 )[0]
__lowerCamelCase, __lowerCamelCase = probs.topk(UpperCamelCase_ )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
__lowerCamelCase = scores.tolist()
__lowerCamelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase_ , UpperCamelCase_ )]
| 29
| 0
|
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Any = CLIPTokenizer
UpperCAmelCase__ : Optional[int] = CLIPTokenizerFast
UpperCAmelCase__ : Dict = True
UpperCAmelCase__ : str = {}
UpperCAmelCase__ : List[str] = False
def lowerCAmelCase__ ( self: Union[str, Any] ):
super().setUp()
# fmt: off
__lowerCamelCase = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
__lowerCamelCase = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__lowerCamelCase = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""]
__lowerCamelCase = {"""unk_token""": """<unk>"""}
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: List[Any] , **UpperCamelCase_: Dict ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , **UpperCamelCase_: Dict ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: int ):
__lowerCamelCase = """lower newer"""
__lowerCamelCase = """lower newer"""
return input_text, output_text
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCamelCase = """lower newer"""
__lowerCamelCase = ["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""]
__lowerCamelCase = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = tokens + [tokenizer.unk_token]
__lowerCamelCase = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
@require_ftfy
def lowerCAmelCase__ ( self: Tuple ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCamelCase = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = """A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."""
__lowerCamelCase = tokenizer_s.tokenize(UpperCamelCase_ )
__lowerCamelCase = tokenizer_r.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
__lowerCamelCase = """xa\u0303y""" + """ """ + """x\xe3y"""
__lowerCamelCase = tokenizer_s.tokenize(UpperCamelCase_ )
__lowerCamelCase = tokenizer_r.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
# Test that the tokenization is identical on unicode of space type
__lowerCamelCase = [
"""\u0009""", # (horizontal tab, '\t')
"""\u000B""", # (vertical tab)
"""\u000C""", # (form feed)
"""\u0020""", # (space, ' ')
"""\u200E""", # (left-to-right mark):w
"""\u200F""", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
__lowerCamelCase = tokenizer_s.tokenize(UpperCamelCase_ )
__lowerCamelCase = tokenizer_r.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
# Test that the tokenization is identical on unicode of line break type
__lowerCamelCase = [
"""\u000A""", # (line feed, '\n')
"""\r\n""", # (carriage return and line feed, '\r\n')
"""\u000D""", # (carriage return, '\r')
"""\r""", # (carriage return, '\r')
"""\u000D""", # (carriage return, '\r')
"""\u2028""", # (line separator)
"""\u2029""", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
__lowerCamelCase = tokenizer_s.tokenize(UpperCamelCase_ )
__lowerCamelCase = tokenizer_r.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCamelCase = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
__lowerCamelCase = F'{text_of_1_token} {text_of_1_token}'
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , )
__lowerCamelCase = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase_ ) + 1, len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
__lowerCamelCase = F' {text}'
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , )
__lowerCamelCase = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase_ ) + 1, 1 + len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
def lowerCAmelCase__ ( self: str ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(UpperCamelCase_ ) as context:
self.rust_tokenizer_class.from_pretrained("""robot-test/old-clip-tokenizer""" )
self.assertTrue(
context.exception.args[0].startswith(
"""The `backend_tokenizer` provided does not match the expected format.""" ) )
@require_ftfy
def lowerCAmelCase__ ( self: Union[str, Any] ):
super().test_tokenization_python_rust_equals()
def lowerCAmelCase__ ( self: Any ):
# CLIP always lower cases letters
pass
| 352
|
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase)
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: List[Any] , *UpperCamelCase_: Dict , **UpperCamelCase_: Dict ):
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
self.check_model_type(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: str=None , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: Optional[int]=None , **UpperCamelCase_: List[Any] ):
__lowerCamelCase, __lowerCamelCase = {}, {}
if padding is not None:
__lowerCamelCase = padding
if truncation is not None:
__lowerCamelCase = truncation
if top_k is not None:
__lowerCamelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self: Optional[Any] , UpperCamelCase_: Union["Image.Image", str] , UpperCamelCase_: str = None , **UpperCamelCase_: List[str] ):
if isinstance(UpperCamelCase_ , (Image.Image, str) ) and isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = {"""image""": image, """question""": question}
else:
__lowerCamelCase = image
__lowerCamelCase = super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
return results
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: str , UpperCamelCase_: Any=False , UpperCamelCase_: Optional[int]=False ):
__lowerCamelCase = load_image(inputs["""image"""] )
__lowerCamelCase = self.tokenizer(
inputs["""question"""] , return_tensors=self.framework , padding=UpperCamelCase_ , truncation=UpperCamelCase_ )
__lowerCamelCase = self.image_processor(images=UpperCamelCase_ , return_tensors=self.framework )
model_inputs.update(UpperCamelCase_ )
return model_inputs
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: Tuple ):
__lowerCamelCase = self.model(**UpperCamelCase_ )
return model_outputs
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any]=5 ):
if top_k > self.model.config.num_labels:
__lowerCamelCase = self.model.config.num_labels
if self.framework == "pt":
__lowerCamelCase = model_outputs.logits.sigmoid()[0]
__lowerCamelCase, __lowerCamelCase = probs.topk(UpperCamelCase_ )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
__lowerCamelCase = scores.tolist()
__lowerCamelCase = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase_ , UpperCamelCase_ )]
| 29
| 0
|
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class lowerCamelCase__:
def __init__( self: str , UpperCamelCase_: List[Any] , UpperCamelCase_: Dict=13 , UpperCamelCase_: Tuple=7 , UpperCamelCase_: Tuple=True , UpperCamelCase_: List[str]=True , UpperCamelCase_: Dict=False , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Union[str, Any]=99 , UpperCamelCase_: Dict=32 , UpperCamelCase_: Union[str, Any]=5 , UpperCamelCase_: int=4 , UpperCamelCase_: Dict=37 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: Union[str, Any]=0.1 , UpperCamelCase_: Any=5_12 , UpperCamelCase_: Union[str, Any]=16 , UpperCamelCase_: Dict=2 , UpperCamelCase_: Tuple=0.02 , UpperCamelCase_: List[Any]=3 , UpperCamelCase_: List[Any]=4 , UpperCamelCase_: Any=None , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self: Optional[int] ):
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , use_stable_embedding=UpperCamelCase_ , )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: Any , UpperCamelCase_: int , UpperCamelCase_: List[str] , UpperCamelCase_: str , UpperCamelCase_: List[str] , UpperCamelCase_: Any ):
__lowerCamelCase = OpenLlamaModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
__lowerCamelCase = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Any , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[str] , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: str , UpperCamelCase_: Any , UpperCamelCase_: int , UpperCamelCase_: str , ):
__lowerCamelCase = True
__lowerCamelCase = OpenLlamaModel(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )
__lowerCamelCase = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , )
__lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: Optional[int] , UpperCamelCase_: int , UpperCamelCase_: Optional[int] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: str , UpperCamelCase_: Dict , ):
__lowerCamelCase = OpenLlamaForCausalLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: Optional[Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Any , ):
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = OpenLlamaForCausalLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
# first forward pass
__lowerCamelCase = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ , )
__lowerCamelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowerCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCamelCase = torch.cat([input_mask, next_mask] , dim=-1 )
__lowerCamelCase = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )["""hidden_states"""][0]
__lowerCamelCase = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )["""hidden_states"""][0]
# select random slice
__lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowerCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) )
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
),
) = config_and_inputs
__lowerCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : int = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
UpperCAmelCase__ : Dict = (OpenLlamaForCausalLM,) if is_torch_available() else ()
UpperCAmelCase__ : int = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Optional[Any] = False
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = OpenLlamaModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 )
def lowerCAmelCase__ ( self: Optional[int] ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCamelCase = type
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = 3
__lowerCamelCase = input_dict["""input_ids"""]
__lowerCamelCase = input_ids.ne(1 ).to(UpperCamelCase_ )
__lowerCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__lowerCamelCase = OpenLlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = 3
__lowerCamelCase = """single_label_classification"""
__lowerCamelCase = input_dict["""input_ids"""]
__lowerCamelCase = input_ids.ne(1 ).to(UpperCamelCase_ )
__lowerCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__lowerCamelCase = OpenLlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = 3
__lowerCamelCase = """multi_label_classification"""
__lowerCamelCase = input_dict["""input_ids"""]
__lowerCamelCase = input_ids.ne(1 ).to(UpperCamelCase_ )
__lowerCamelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__lowerCamelCase = OpenLlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def lowerCAmelCase__ ( self: Dict ):
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = ids_tensor([1, 10] , config.vocab_size )
__lowerCamelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__lowerCamelCase = OpenLlamaModel(UpperCamelCase_ )
original_model.to(UpperCamelCase_ )
original_model.eval()
__lowerCamelCase = original_model(UpperCamelCase_ ).last_hidden_state
__lowerCamelCase = original_model(UpperCamelCase_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__lowerCamelCase = {"""type""": scaling_type, """factor""": 10.0}
__lowerCamelCase = OpenLlamaModel(UpperCamelCase_ )
scaled_model.to(UpperCamelCase_ )
scaled_model.eval()
__lowerCamelCase = scaled_model(UpperCamelCase_ ).last_hidden_state
__lowerCamelCase = scaled_model(UpperCamelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
| 353
|
UpperCAmelCase_ = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
UpperCAmelCase_ = ['a', 'b', 'c', 'd', 'e']
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : Optional[int] , A__ : str ):
'''simple docstring'''
__lowerCamelCase = start
# add current to visited
visited.append(A__ )
__lowerCamelCase = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__lowerCamelCase = topological_sort(A__ , A__ , A__ )
# if all neighbors visited add current to sort
sort.append(A__ )
# if all vertices haven't been visited select a new one to visit
if len(A__ ) != len(A__ ):
for vertice in vertices:
if vertice not in visited:
__lowerCamelCase = topological_sort(A__ , A__ , A__ )
# return sort
return sort
if __name__ == "__main__":
UpperCAmelCase_ = topological_sort('a', [], [])
print(sort)
| 29
| 0
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {'vocab_file': 'spiece.model'}
UpperCAmelCase_ = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
UpperCAmelCase_ = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
UpperCAmelCase_ = 0
UpperCAmelCase_ = 1
UpperCAmelCase_ = 2
UpperCAmelCase_ = 3
UpperCAmelCase_ = 4
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Any = VOCAB_FILES_NAMES
UpperCAmelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Tuple = 'left'
def __init__( self: Optional[Any] , UpperCamelCase_: List[str] , UpperCamelCase_: Tuple=False , UpperCamelCase_: Dict=True , UpperCamelCase_: List[str]=False , UpperCamelCase_: Dict="<s>" , UpperCamelCase_: Dict="</s>" , UpperCamelCase_: Any="<unk>" , UpperCamelCase_: Optional[int]="<sep>" , UpperCamelCase_: List[str]="<pad>" , UpperCamelCase_: List[Any]="<cls>" , UpperCamelCase_: Union[str, Any]="<mask>" , UpperCamelCase_: List[str]=["<eop>", "<eod>"] , UpperCamelCase_: Optional[Dict[str, Any]] = None , **UpperCamelCase_: str , ):
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
__lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase_ , remove_space=UpperCamelCase_ , keep_accents=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
__lowerCamelCase = 3
__lowerCamelCase = do_lower_case
__lowerCamelCase = remove_space
__lowerCamelCase = keep_accents
__lowerCamelCase = vocab_file
__lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase_ )
@property
def lowerCAmelCase__ ( self: Dict ):
return len(self.sp_model )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self: Any ):
__lowerCamelCase = self.__dict__.copy()
__lowerCamelCase = None
return state
def __setstate__( self: Optional[Any] , UpperCamelCase_: Optional[int] ):
__lowerCamelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__lowerCamelCase = {}
__lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: List[Any] ):
if self.remove_space:
__lowerCamelCase = """ """.join(inputs.strip().split() )
else:
__lowerCamelCase = inputs
__lowerCamelCase = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
__lowerCamelCase = unicodedata.normalize("""NFKD""" , UpperCamelCase_ )
__lowerCamelCase = """""".join([c for c in outputs if not unicodedata.combining(UpperCamelCase_ )] )
if self.do_lower_case:
__lowerCamelCase = outputs.lower()
return outputs
def lowerCAmelCase__ ( self: int , UpperCamelCase_: str ):
__lowerCamelCase = self.preprocess_text(UpperCamelCase_ )
__lowerCamelCase = self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
__lowerCamelCase = []
for piece in pieces:
if len(UpperCamelCase_ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
__lowerCamelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase_ , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__lowerCamelCase = cur_pieces[1:]
else:
__lowerCamelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCamelCase_ )
else:
new_pieces.append(UpperCamelCase_ )
return new_pieces
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: List[Any] ):
return self.sp_model.PieceToId(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Optional[Any] ):
return self.sp_model.IdToPiece(UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = """""".join(UpperCamelCase_ ).replace(UpperCamelCase_ , """ """ ).strip()
return out_string
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: List[int] , UpperCamelCase_: bool = False , UpperCamelCase_: bool = None , UpperCamelCase_: bool = True , **UpperCamelCase_: Union[str, Any] , ):
__lowerCamelCase = kwargs.pop("""use_source_tokenizer""" , UpperCamelCase_ )
__lowerCamelCase = self.convert_ids_to_tokens(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
__lowerCamelCase = []
__lowerCamelCase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCamelCase_ ) )
__lowerCamelCase = []
sub_texts.append(UpperCamelCase_ )
else:
current_sub_text.append(UpperCamelCase_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCamelCase_ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
__lowerCamelCase = """""".join(UpperCamelCase_ )
__lowerCamelCase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
__lowerCamelCase = self.clean_up_tokenization(UpperCamelCase_ )
return clean_text
else:
return text
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ):
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowerCAmelCase__ ( self: int , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None , UpperCamelCase_: bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is not None:
return ([0] * len(UpperCamelCase_ )) + [1] + ([0] * len(UpperCamelCase_ )) + [1, 1]
return ([0] * len(UpperCamelCase_ )) + [1, 1]
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ):
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowerCAmelCase__ ( self: str , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__lowerCamelCase = os.path.join(
UpperCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , """wb""" ) as fi:
__lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
| 354
|
import requests
from bsa import BeautifulSoup
def lowerCamelCase__ ( A__ : str = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
__lowerCamelCase = BeautifulSoup(requests.get(A__ ).text , """html.parser""" )
__lowerCamelCase = soup.findAll("""h1""" )
__lowerCamelCase = soup.findAll("""div""" , {"""class""": """maincounter-number"""} )
keys += soup.findAll("""span""" , {"""class""": """panel-title"""} )
values += soup.findAll("""div""" , {"""class""": """number-table-main"""} )
return {key.text.strip(): value.text.strip() for key, value in zip(A__ , A__ )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(f"""{key}\n{value}\n""")
| 29
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = 'speech_to_text_2'
UpperCAmelCase__ : Any = ['past_key_values']
UpperCAmelCase__ : Dict = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self: int , UpperCamelCase_: int=1_00_00 , UpperCamelCase_: Optional[int]=6 , UpperCamelCase_: Union[str, Any]=20_48 , UpperCamelCase_: Any=4 , UpperCamelCase_: str=0.0 , UpperCamelCase_: List[str]=True , UpperCamelCase_: Union[str, Any]="relu" , UpperCamelCase_: Union[str, Any]=2_56 , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: Dict=0.0 , UpperCamelCase_: Any=0.0 , UpperCamelCase_: List[str]=0.02 , UpperCamelCase_: Dict=2 , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: List[str]=1 , UpperCamelCase_: Optional[Any]=0 , UpperCamelCase_: Tuple=2 , UpperCamelCase_: Tuple=10_24 , **UpperCamelCase_: Union[str, Any] , ):
__lowerCamelCase = vocab_size
__lowerCamelCase = d_model
__lowerCamelCase = decoder_ffn_dim
__lowerCamelCase = decoder_layers
__lowerCamelCase = decoder_attention_heads
__lowerCamelCase = dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = activation_function
__lowerCamelCase = init_std
__lowerCamelCase = decoder_layerdrop
__lowerCamelCase = use_cache
__lowerCamelCase = decoder_layers
__lowerCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCamelCase = max_target_positions
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
| 355
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Union[str, Any] = 'yolos'
def __init__( self: Dict , UpperCamelCase_: List[Any]=7_68 , UpperCamelCase_: Tuple=12 , UpperCamelCase_: int=12 , UpperCamelCase_: int=30_72 , UpperCamelCase_: List[str]="gelu" , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: int=0.0 , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: Dict=1E-12 , UpperCamelCase_: List[Any]=[5_12, 8_64] , UpperCamelCase_: Optional[int]=16 , UpperCamelCase_: Any=3 , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: List[str]=1_00 , UpperCamelCase_: List[str]=True , UpperCamelCase_: Any=False , UpperCamelCase_: Optional[Any]=1 , UpperCamelCase_: Any=5 , UpperCamelCase_: Any=2 , UpperCamelCase_: Tuple=5 , UpperCamelCase_: str=2 , UpperCamelCase_: Any=0.1 , **UpperCamelCase_: Any , ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = qkv_bias
__lowerCamelCase = num_detection_tokens
__lowerCamelCase = use_mid_position_embeddings
__lowerCamelCase = auxiliary_loss
# Hungarian matcher
__lowerCamelCase = class_cost
__lowerCamelCase = bbox_cost
__lowerCamelCase = giou_cost
# Loss coefficients
__lowerCamelCase = bbox_loss_coefficient
__lowerCamelCase = giou_loss_coefficient
__lowerCamelCase = eos_coefficient
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = version.parse('1.11')
@property
def lowerCAmelCase__ ( self: Any ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase__ ( self: Dict ):
return 1E-4
@property
def lowerCAmelCase__ ( self: Dict ):
return 12
| 29
| 0
|
import heapq
def lowerCamelCase__ ( A__ : dict ):
'''simple docstring'''
__lowerCamelCase = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(A__ , [-1 * len(A__ ), (key, value)] )
# chosen_vertices = set of chosen vertices
__lowerCamelCase = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
__lowerCamelCase = heapq.heappop(A__ )[1][0]
chosen_vertices.add(A__ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
__lowerCamelCase = elem[1][1].index(A__ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(A__ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 356
|
import os
from math import logaa
def lowerCamelCase__ ( A__ : str = "base_exp.txt" ):
'''simple docstring'''
__lowerCamelCase = 0
__lowerCamelCase = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(A__ ) , A__ ) ) ):
__lowerCamelCase, __lowerCamelCase = list(map(A__ , line.split(""",""" ) ) )
if x * logaa(A__ ) > largest:
__lowerCamelCase = x * logaa(A__ )
__lowerCamelCase = i + 1
return result
if __name__ == "__main__":
print(solution())
| 29
| 0
|
import numpy as np
def lowerCamelCase__ ( A__ : np.array ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCamelCase__ ( A__ : Tuple , A__ : Optional[int]=0.999 , A__ : Any="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(A__ : Any ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(A__ : Optional[int] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'Unsupported alpha_tranform_type: {alpha_transform_type}' )
__lowerCamelCase = []
for i in range(A__ ):
__lowerCamelCase = i / num_diffusion_timesteps
__lowerCamelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(A__ ) / alpha_bar_fn(A__ ) , A__ ) )
return torch.tensor(A__ , dtype=torch.floataa )
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
UpperCAmelCase__ : List[str] = [e.name for e in KarrasDiffusionSchedulers]
UpperCAmelCase__ : Any = 2
@register_to_config
def __init__( self: List[str] , UpperCamelCase_: int = 10_00 , UpperCamelCase_: float = 0.0_0085 , UpperCamelCase_: float = 0.012 , UpperCamelCase_: str = "linear" , UpperCamelCase_: Optional[Union[np.ndarray, List[float]]] = None , UpperCamelCase_: str = "epsilon" , UpperCamelCase_: str = "linspace" , UpperCamelCase_: int = 0 , ):
if trained_betas is not None:
__lowerCamelCase = torch.tensor(UpperCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "linear":
__lowerCamelCase = torch.linspace(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__lowerCamelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , UpperCamelCase_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__lowerCamelCase = betas_for_alpha_bar(UpperCamelCase_ )
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' )
__lowerCamelCase = 1.0 - self.betas
__lowerCamelCase = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[Any]=None ):
if schedule_timesteps is None:
__lowerCamelCase = self.timesteps
__lowerCamelCase = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__lowerCamelCase = 1 if len(UpperCamelCase_ ) > 1 else 0
else:
__lowerCamelCase = timestep.cpu().item() if torch.is_tensor(UpperCamelCase_ ) else timestep
__lowerCamelCase = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowerCAmelCase__ ( self: Optional[int] ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: Union[float, torch.FloatTensor] , ):
__lowerCamelCase = self.index_for_timestep(UpperCamelCase_ )
if self.state_in_first_order:
__lowerCamelCase = self.sigmas[step_index]
else:
__lowerCamelCase = self.sigmas_interpol[step_index]
__lowerCamelCase = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: Union[str, torch.device] = None , UpperCamelCase_: Optional[int] = None , ):
__lowerCamelCase = num_inference_steps
__lowerCamelCase = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__lowerCamelCase = np.linspace(0 , num_train_timesteps - 1 , UpperCamelCase_ , dtype=UpperCamelCase_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__lowerCamelCase = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowerCamelCase = (np.arange(0 , UpperCamelCase_ ) * step_ratio).round()[::-1].copy().astype(UpperCamelCase_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__lowerCamelCase = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowerCamelCase = (np.arange(UpperCamelCase_ , 0 , -step_ratio )).round().copy().astype(UpperCamelCase_ )
timesteps -= 1
else:
raise ValueError(
F'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
__lowerCamelCase = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__lowerCamelCase = torch.from_numpy(np.log(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = np.interp(UpperCamelCase_ , np.arange(0 , len(UpperCamelCase_ ) ) , UpperCamelCase_ )
__lowerCamelCase = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__lowerCamelCase = torch.from_numpy(UpperCamelCase_ ).to(device=UpperCamelCase_ )
# interpolate sigmas
__lowerCamelCase = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
__lowerCamelCase = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__lowerCamelCase = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(UpperCamelCase_ ).startswith("""mps""" ):
# mps does not support float64
__lowerCamelCase = torch.from_numpy(UpperCamelCase_ ).to(UpperCamelCase_ , dtype=torch.floataa )
else:
__lowerCamelCase = torch.from_numpy(UpperCamelCase_ ).to(UpperCamelCase_ )
# interpolate timesteps
__lowerCamelCase = self.sigma_to_t(UpperCamelCase_ ).to(UpperCamelCase_ , dtype=timesteps.dtype )
__lowerCamelCase = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
__lowerCamelCase = torch.cat([timesteps[:1], interleaved_timesteps] )
__lowerCamelCase = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__lowerCamelCase = defaultdict(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
# get log sigma
__lowerCamelCase = sigma.log()
# get distribution
__lowerCamelCase = log_sigma - self.log_sigmas[:, None]
# get sigmas range
__lowerCamelCase = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__lowerCamelCase = low_idx + 1
__lowerCamelCase = self.log_sigmas[low_idx]
__lowerCamelCase = self.log_sigmas[high_idx]
# interpolate sigmas
__lowerCamelCase = (low - log_sigma) / (low - high)
__lowerCamelCase = w.clamp(0 , 1 )
# transform interpolation to time range
__lowerCamelCase = (1 - w) * low_idx + w * high_idx
__lowerCamelCase = t.view(sigma.shape )
return t
@property
def lowerCAmelCase__ ( self: Dict ):
return self.sample is None
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Union[torch.FloatTensor, np.ndarray] , UpperCamelCase_: Union[float, torch.FloatTensor] , UpperCamelCase_: Union[torch.FloatTensor, np.ndarray] , UpperCamelCase_: bool = True , ):
__lowerCamelCase = self.index_for_timestep(UpperCamelCase_ )
# advance index counter by 1
__lowerCamelCase = timestep.cpu().item() if torch.is_tensor(UpperCamelCase_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__lowerCamelCase = self.sigmas[step_index]
__lowerCamelCase = self.sigmas_interpol[step_index + 1]
__lowerCamelCase = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__lowerCamelCase = self.sigmas[step_index - 1]
__lowerCamelCase = self.sigmas_interpol[step_index]
__lowerCamelCase = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__lowerCamelCase = 0
__lowerCamelCase = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__lowerCamelCase = sigma_hat if self.state_in_first_order else sigma_interpol
__lowerCamelCase = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__lowerCamelCase = sigma_hat if self.state_in_first_order else sigma_interpol
__lowerCamelCase = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("""prediction_type not implemented yet: sample""" )
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__lowerCamelCase = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__lowerCamelCase = sigma_interpol - sigma_hat
# store for 2nd order step
__lowerCamelCase = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__lowerCamelCase = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__lowerCamelCase = sigma_next - sigma_hat
__lowerCamelCase = self.sample
__lowerCamelCase = None
__lowerCamelCase = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__lowerCamelCase = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(UpperCamelCase_ ):
# mps does not support float64
__lowerCamelCase = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__lowerCamelCase = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__lowerCamelCase = self.timesteps.to(original_samples.device )
__lowerCamelCase = timesteps.to(original_samples.device )
__lowerCamelCase = [self.index_for_timestep(UpperCamelCase_ , UpperCamelCase_ ) for t in timesteps]
__lowerCamelCase = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__lowerCamelCase = sigma.unsqueeze(-1 )
__lowerCamelCase = original_samples + noise * sigma
return noisy_samples
def __len__( self: Tuple ):
return self.config.num_train_timesteps
| 29
| 0
|
def lowerCamelCase__ ( A__ : int , A__ : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
__lowerCamelCase = str(bin(A__ ) )[2:] # remove the leading "0b"
__lowerCamelCase = str(bin(A__ ) )[2:] # remove the leading "0b"
__lowerCamelCase = max(len(A__ ) , len(A__ ) )
return "0b" + "".join(
str(int(char_a == """1""" and char_b == """1""" ) )
for char_a, char_b in zip(a_binary.zfill(A__ ) , b_binary.zfill(A__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358
|
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Union[str, Any] = IFImgaImgSuperResolutionPipeline
UpperCAmelCase__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'}
UpperCAmelCase__ : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'})
UpperCAmelCase__ : Tuple = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCAmelCase__ ( self: Optional[int] ):
return self._get_superresolution_dummy_components()
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Any , UpperCamelCase_: Dict=0 ):
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase__ ( self: Dict ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCAmelCase__ ( self: int ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCAmelCase__ ( self: Optional[Any] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCAmelCase__ ( self: Optional[Any] ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCAmelCase__ ( self: List[str] ):
self._test_save_load_local()
def lowerCAmelCase__ ( self: List[Any] ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 29
| 0
|
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : List[str] = DebertaTokenizer
UpperCAmelCase__ : List[str] = True
UpperCAmelCase__ : Tuple = DebertaTokenizerFast
def lowerCAmelCase__ ( self: Union[str, Any] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCamelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""[UNK]""",
]
__lowerCamelCase = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__lowerCamelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__lowerCamelCase = {"""unk_token""": """[UNK]"""}
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: Tuple , **UpperCamelCase_: Any ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = """lower newer"""
__lowerCamelCase = """lower newer"""
return input_text, output_text
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = """lower newer"""
__lowerCamelCase = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
__lowerCamelCase = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = tokens + [tokenizer.unk_token]
__lowerCamelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = tokenizer("""Hello""" , """World""" )
__lowerCamelCase = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["""token_type_ids"""] , UpperCamelCase_ )
@slow
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
__lowerCamelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.encode(
"""sequence builders""" , add_special_tokens=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ )
__lowerCamelCase = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
__lowerCamelCase = tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
__lowerCamelCase = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
__lowerCamelCase = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ )
__lowerCamelCase = [tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ ) for seq in encoding["""input_ids"""]]
# fmt: off
__lowerCamelCase = {
"""input_ids""": [
[1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2]
],
"""token_type_ids""": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
__lowerCamelCase = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
self.assertDictEqual(encoding.data , UpperCamelCase_ )
for expected, decoded in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
| 359
|
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
__lowerCamelCase = [False] * len(A__ )
__lowerCamelCase = [-1] * len(A__ )
def dfs(A__ : Optional[int] , A__ : Optional[int] ):
__lowerCamelCase = True
__lowerCamelCase = c
for u in graph[v]:
if not visited[u]:
dfs(A__ , 1 - c )
for i in range(len(A__ ) ):
if not visited[i]:
dfs(A__ , 0 )
for i in range(len(A__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
UpperCAmelCase_ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 29
| 0
|
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = 0
@slow
def lowerCAmelCase__ ( self: Dict ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
__lowerCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(UpperCamelCase_ ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
__lowerCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(UpperCamelCase_ ) , 0 )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
# Check that tokenizer_type ≠ model_type
__lowerCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase_ , config=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowerCAmelCase__ ( self: List[str] ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(UpperCamelCase_ , """vocab.txt""" ) )
__lowerCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type="""bert""" , use_fast=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(UpperCamelCase_ , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(UpperCamelCase_ , """merges.txt""" ) )
__lowerCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type="""gpt2""" , use_fast=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
@require_tokenizers
def lowerCAmelCase__ ( self: str ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(UpperCamelCase_ , """vocab.txt""" ) )
__lowerCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type="""bert""" )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(UpperCamelCase_ , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(UpperCamelCase_ , """merges.txt""" ) )
__lowerCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type="""gpt2""" )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: str ):
with pytest.raises(UpperCamelCase_ ):
AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" )
@require_tokenizers
def lowerCAmelCase__ ( self: Optional[Any] ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
__lowerCamelCase = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , UpperCamelCase_ )
else:
self.assertEqual(tokenizer.do_lower_case , UpperCamelCase_ )
self.assertEqual(tokenizer.model_max_length , 5_12 )
@require_tokenizers
def lowerCAmelCase__ ( self: Union[str, Any] ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
UpperCamelCase_ , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ):
__lowerCamelCase = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" )
def lowerCAmelCase__ ( self: Any ):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
__lowerCamelCase = TOKENIZER_MAPPING.values()
__lowerCamelCase = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(UpperCamelCase_ )
@require_tokenizers
def lowerCAmelCase__ ( self: Union[str, Any] ):
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=UpperCamelCase_ ) , UpperCamelCase_ )
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , UpperCamelCase_ )
@require_tokenizers
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=UpperCamelCase_ )
__lowerCamelCase = """Hello, world. How are you?"""
__lowerCamelCase = tokenizer.tokenize(UpperCamelCase_ )
self.assertEqual("""[UNK]""" , tokens[0] )
__lowerCamelCase = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=UpperCamelCase_ )
__lowerCamelCase = tokenizer.tokenize(UpperCamelCase_ )
self.assertEqual("""[UNK]""" , tokens[0] )
@require_tokenizers
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" )
self.assertEqual(type(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(tokenizer.model_max_length , 5_12 )
self.assertEqual(tokenizer.vocab_size , 3_00_00 )
self.assertEqual(tokenizer.unk_token , """[UNK]""" )
self.assertEqual(tokenizer.padding_side , """right""" )
self.assertEqual(tokenizer.truncation_side , """right""" )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = AutoTokenizer.from_pretrained("""ctrl""" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: str ):
# Check we can load the tokenizer config of an online model.
__lowerCamelCase = get_tokenizer_config("""bert-base-cased""" )
__lowerCamelCase = config.pop("""_commit_hash""" , UpperCamelCase_ )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(UpperCamelCase_ , {"""do_lower_case""": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
__lowerCamelCase = get_tokenizer_config(UpperCamelCase_ )
self.assertDictEqual(UpperCamelCase_ , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
__lowerCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = get_tokenizer_config(UpperCamelCase_ )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" )
def lowerCAmelCase__ ( self: str ):
try:
AutoConfig.register("""custom""" , UpperCamelCase_ )
AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase_ ):
AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ )
__lowerCamelCase = CustomTokenizer.from_pretrained(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowerCAmelCase__ ( self: Tuple ):
try:
AutoConfig.register("""custom""" , UpperCamelCase_ )
# Can register in two steps
AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase_ ):
AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCamelCase = BertTokenizerFast.from_pretrained(UpperCamelCase_ )
bert_tokenizer.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = CustomTokenizerFast.from_pretrained(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase__ ( self: str ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=UpperCamelCase_ )
__lowerCamelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=UpperCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
__lowerCamelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
@require_tokenizers
def lowerCAmelCase__ ( self: Dict ):
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Dict = False
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Union[str, Any] = NewTokenizer
UpperCAmelCase__ : Any = False
try:
AutoConfig.register("""custom""" , UpperCamelCase_ )
AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ )
AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ )
# If remote code is not set, the default is to use local
__lowerCamelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
__lowerCamelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
__lowerCamelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
__lowerCamelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
__lowerCamelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertTrue(tokenizer.special_attribute_present )
__lowerCamelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=UpperCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
__lowerCamelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def lowerCAmelCase__ ( self: List[str] ):
with self.assertRaisesRegex(
UpperCamelCase_ , """bert-base is not a local folder and is not a valid model identifier""" ):
__lowerCamelCase = AutoTokenizer.from_pretrained("""bert-base""" )
def lowerCAmelCase__ ( self: Any ):
with self.assertRaisesRegex(
UpperCamelCase_ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__lowerCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase_ , revision="""aaaaaa""" )
def lowerCAmelCase__ ( self: int ):
# Make sure we have cached the tokenizer.
__lowerCamelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
__lowerCamelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 360
|
from __future__ import annotations
UpperCAmelCase_ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCamelCase__:
def __init__( self: Tuple , UpperCamelCase_: dict[str, list[str]] , UpperCamelCase_: str ):
__lowerCamelCase = graph
# mapping node to its parent in resulting breadth first tree
__lowerCamelCase = {}
__lowerCamelCase = source_vertex
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = {self.source_vertex}
__lowerCamelCase = None
__lowerCamelCase = [self.source_vertex] # first in first out queue
while queue:
__lowerCamelCase = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(UpperCamelCase_ )
__lowerCamelCase = vertex
queue.append(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: str ):
if target_vertex == self.source_vertex:
return self.source_vertex
__lowerCamelCase = self.parent.get(UpperCamelCase_ )
if target_vertex_parent is None:
__lowerCamelCase = (
F'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(UpperCamelCase_ )
return self.shortest_path(UpperCamelCase_ ) + F'->{target_vertex}'
if __name__ == "__main__":
UpperCAmelCase_ = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 29
| 0
|
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : List[str] = BertJapaneseTokenizer
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : str = True
def lowerCAmelCase__ ( self: str ):
super().setUp()
__lowerCamelCase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""こんにちは""",
"""こん""",
"""にちは""",
"""ばんは""",
"""##こん""",
"""##にちは""",
"""##ばんは""",
"""世界""",
"""##世界""",
"""、""",
"""##、""",
"""。""",
"""##。""",
]
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: Dict ):
__lowerCamelCase = """こんにちは、世界。 \nこんばんは、世界。"""
__lowerCamelCase = """こんにちは 、 世界 。 こんばんは 、 世界 。"""
return input_text, output_text
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Optional[Any] ):
__lowerCamelCase, __lowerCamelCase = self.get_input_output_texts(UpperCamelCase_ )
__lowerCamelCase = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.decode(UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
return text, ids
def lowerCAmelCase__ ( self: List[Any] ):
pass # TODO add if relevant
def lowerCAmelCase__ ( self: Union[str, Any] ):
pass # TODO add if relevant
def lowerCAmelCase__ ( self: List[Any] ):
pass # TODO add if relevant
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = self.tokenizer_class(self.vocab_file )
__lowerCamelCase = tokenizer.tokenize("""こんにちは、世界。\nこんばんは、世界。""" )
self.assertListEqual(UpperCamelCase_ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""mecab""" )
self.assertIsNotNone(UpperCamelCase_ )
__lowerCamelCase = """こんにちは、世界。\nこんばんは、世界。"""
__lowerCamelCase = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
__lowerCamelCase = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(UpperCamelCase_ , """wb""" ) as handle:
pickle.dump(UpperCamelCase_ , UpperCamelCase_ )
with open(UpperCamelCase_ , """rb""" ) as handle:
__lowerCamelCase = pickle.load(UpperCamelCase_ )
__lowerCamelCase = tokenizer_new.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = MecabTokenizer(mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def lowerCAmelCase__ ( self: str ):
try:
__lowerCamelCase = MecabTokenizer(mecab_dic="""unidic_lite""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def lowerCAmelCase__ ( self: List[Any] ):
try:
__lowerCamelCase = MecabTokenizer(mecab_dic="""unidic""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = MecabTokenizer(do_lower_case=UpperCamelCase_ , mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iphone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def lowerCAmelCase__ ( self: int ):
try:
__lowerCamelCase = MecabTokenizer(
do_lower_case=UpperCamelCase_ , normalize_text=UpperCamelCase_ , mecab_option="""-d /usr/local/lib/mecab/dic/jumandic""" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = MecabTokenizer(normalize_text=UpperCamelCase_ , mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """ """, """。"""] , )
@require_sudachi
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""sudachi""" )
self.assertIsNotNone(UpperCamelCase_ )
__lowerCamelCase = """こんにちは、世界。\nこんばんは、世界。"""
__lowerCamelCase = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
__lowerCamelCase = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(UpperCamelCase_ , """wb""" ) as handle:
pickle.dump(UpperCamelCase_ , UpperCamelCase_ )
with open(UpperCamelCase_ , """rb""" ) as handle:
__lowerCamelCase = pickle.load(UpperCamelCase_ )
__lowerCamelCase = tokenizer_new.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
@require_sudachi
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = SudachiTokenizer(sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""A""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国""", """人""", """参政""", """権"""] )
@require_sudachi
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""B""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国人""", """参政権"""] )
@require_sudachi
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""C""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国人参政権"""] )
@require_sudachi
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = SudachiTokenizer(do_lower_case=UpperCamelCase_ , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iphone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = SudachiTokenizer(normalize_text=UpperCamelCase_ , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """\u3000""", """。""", """ """, """ """] , )
@require_sudachi
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = SudachiTokenizer(trim_whitespace=UpperCamelCase_ , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
@require_jumanpp
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""jumanpp""" )
self.assertIsNotNone(UpperCamelCase_ )
__lowerCamelCase = """こんにちは、世界。\nこんばんは、世界。"""
__lowerCamelCase = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
__lowerCamelCase = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(UpperCamelCase_ , """wb""" ) as handle:
pickle.dump(UpperCamelCase_ , UpperCamelCase_ )
with open(UpperCamelCase_ , """rb""" ) as handle:
__lowerCamelCase = pickle.load(UpperCamelCase_ )
__lowerCamelCase = tokenizer_new.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
@require_jumanpp
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = JumanppTokenizer(do_lower_case=UpperCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iphone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = JumanppTokenizer(normalize_text=UpperCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""ア""", """ッ""", """フ""", """゚""", """ル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = JumanppTokenizer(trim_whitespace=UpperCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """。"""] , )
@require_jumanpp
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("""ありがとうございますm(_ _)m見つけるのが大変です。""" ) , ["""ありがとう""", """ございます""", """m(_ _)m""", """見つける""", """の""", """が""", """大変です""", """。"""] , )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """こんにちは""", """こん""", """にちは""", """ばんは""", """##こん""", """##にちは""", """##ばんは"""]
__lowerCamelCase = {}
for i, token in enumerate(UpperCamelCase_ ):
__lowerCamelCase = i
__lowerCamelCase = WordpieceTokenizer(vocab=UpperCamelCase_ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) , ["""こんにちは"""] )
self.assertListEqual(tokenizer.tokenize("""こんばんは""" ) , ["""こん""", """##ばんは"""] )
self.assertListEqual(tokenizer.tokenize("""こんばんは こんばんにちは こんにちは""" ) , ["""こん""", """##ばんは""", """[UNK]""", """こんにちは"""] )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = BertJapaneseTokenizer.from_pretrained("""nlp-waseda/roberta-base-japanese-with-auto-jumanpp""" )
__lowerCamelCase = tokenizer.subword_tokenizer
__lowerCamelCase = subword_tokenizer.tokenize("""国境 の 長い トンネル を 抜ける と 雪国 であった 。""" )
self.assertListEqual(UpperCamelCase_ , ["""▁国境""", """▁の""", """▁長い""", """▁トンネル""", """▁を""", """▁抜ける""", """▁と""", """▁雪""", """国""", """▁であった""", """▁。"""] )
__lowerCamelCase = subword_tokenizer.tokenize("""こんばんは こんばん にち は こんにちは""" )
self.assertListEqual(UpperCamelCase_ , ["""▁こん""", """ばん""", """は""", """▁こん""", """ばん""", """▁に""", """ち""", """▁は""", """▁こんにちは"""] )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese""" )
__lowerCamelCase = tokenizer.encode("""ありがとう。""" , add_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.encode("""どういたしまして。""" , add_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : List[str] = BertJapaneseTokenizer
UpperCAmelCase__ : List[Any] = False
def lowerCAmelCase__ ( self: str ):
super().setUp()
__lowerCamelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def lowerCAmelCase__ ( self: List[Any] , **UpperCamelCase_: List[Any] ):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="""character""" , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: List[str] ):
__lowerCamelCase = """こんにちは、世界。 \nこんばんは、世界。"""
__lowerCamelCase = """こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"""
return input_text, output_text
def lowerCAmelCase__ ( self: Tuple ):
pass # TODO add if relevant
def lowerCAmelCase__ ( self: Optional[Any] ):
pass # TODO add if relevant
def lowerCAmelCase__ ( self: List[str] ):
pass # TODO add if relevant
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="""character""" )
__lowerCamelCase = tokenizer.tokenize("""こんにちは、世界。 \nこんばんは、世界。""" )
self.assertListEqual(
UpperCamelCase_ , ["""こ""", """ん""", """に""", """ち""", """は""", """、""", """世""", """界""", """。""", """こ""", """ん""", """ば""", """ん""", """は""", """、""", """世""", """界""", """。"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
__lowerCamelCase = {}
for i, token in enumerate(UpperCamelCase_ ):
__lowerCamelCase = i
__lowerCamelCase = CharacterTokenizer(vocab=UpperCamelCase_ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) , ["""こ""", """ん""", """に""", """ち""", """は"""] )
self.assertListEqual(tokenizer.tokenize("""こんにちほ""" ) , ["""こ""", """ん""", """に""", """ち""", """[UNK]"""] )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese-char""" )
__lowerCamelCase = tokenizer.encode("""ありがとう。""" , add_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.encode("""どういたしまして。""" , add_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = """cl-tohoku/bert-base-japanese"""
__lowerCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = """cl-tohoku/bert-base-japanese"""
with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm:
BertTokenizer.from_pretrained(UpperCamelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
__lowerCamelCase = """bert-base-cased"""
with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm:
BertJapaneseTokenizer.from_pretrained(UpperCamelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
| 361
|
from math import ceil, sqrt
def lowerCamelCase__ ( A__ : int = 1000000 ):
'''simple docstring'''
__lowerCamelCase = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
__lowerCamelCase = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
__lowerCamelCase = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 29
| 0
|
# Imports
import numpy as np
class lowerCamelCase__:
def __init__( self: List[str] , UpperCamelCase_: str=None , UpperCamelCase_: Union[str, Any]=None , UpperCamelCase_: List[Any]=None , UpperCamelCase_: int=None , UpperCamelCase_: Optional[int]=None ):
self.set_matricies(red=UpperCamelCase_ , green=UpperCamelCase_ , blue=UpperCamelCase_ , red_edge=UpperCamelCase_ , nir=UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Any=None , UpperCamelCase_: List[str]=None , UpperCamelCase_: int=None , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: List[str]=None ):
if red is not None:
__lowerCamelCase = red
if green is not None:
__lowerCamelCase = green
if blue is not None:
__lowerCamelCase = blue
if red_edge is not None:
__lowerCamelCase = red_edge
if nir is not None:
__lowerCamelCase = nir
return True
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Optional[int]="" , UpperCamelCase_: Union[str, Any]=None , UpperCamelCase_: List[str]=None , UpperCamelCase_: List[Any]=None , UpperCamelCase_: int=None , UpperCamelCase_: int=None ):
self.set_matricies(red=UpperCamelCase_ , green=UpperCamelCase_ , blue=UpperCamelCase_ , red_edge=UpperCamelCase_ , nir=UpperCamelCase_ )
__lowerCamelCase = {
"""ARVI2""": self.arvaa,
"""CCCI""": self.ccci,
"""CVI""": self.cvi,
"""GLI""": self.gli,
"""NDVI""": self.ndvi,
"""BNDVI""": self.bndvi,
"""redEdgeNDVI""": self.red_edge_ndvi,
"""GNDVI""": self.gndvi,
"""GBNDVI""": self.gbndvi,
"""GRNDVI""": self.grndvi,
"""RBNDVI""": self.rbndvi,
"""PNDVI""": self.pndvi,
"""ATSAVI""": self.atsavi,
"""BWDRVI""": self.bwdrvi,
"""CIgreen""": self.ci_green,
"""CIrededge""": self.ci_rededge,
"""CI""": self.ci,
"""CTVI""": self.ctvi,
"""GDVI""": self.gdvi,
"""EVI""": self.evi,
"""GEMI""": self.gemi,
"""GOSAVI""": self.gosavi,
"""GSAVI""": self.gsavi,
"""Hue""": self.hue,
"""IVI""": self.ivi,
"""IPVI""": self.ipvi,
"""I""": self.i,
"""RVI""": self.rvi,
"""MRVI""": self.mrvi,
"""MSAVI""": self.m_savi,
"""NormG""": self.norm_g,
"""NormNIR""": self.norm_nir,
"""NormR""": self.norm_r,
"""NGRDI""": self.ngrdi,
"""RI""": self.ri,
"""S""": self.s,
"""IF""": self._if,
"""DVI""": self.dvi,
"""TVI""": self.tvi,
"""NDRE""": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("""Index not in the list!""" )
return False
def lowerCAmelCase__ ( self: Optional[Any] ):
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def lowerCAmelCase__ ( self: Any ):
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def lowerCAmelCase__ ( self: Optional[Any] ):
return self.nir * (self.red / (self.green**2))
def lowerCAmelCase__ ( self: Union[str, Any] ):
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def lowerCAmelCase__ ( self: Optional[Any] ):
return (self.nir - self.red) / (self.nir + self.red)
def lowerCAmelCase__ ( self: Optional[Any] ):
return (self.nir - self.blue) / (self.nir + self.blue)
def lowerCAmelCase__ ( self: Dict ):
return (self.redEdge - self.red) / (self.redEdge + self.red)
def lowerCAmelCase__ ( self: int ):
return (self.nir - self.green) / (self.nir + self.green)
def lowerCAmelCase__ ( self: Optional[Any] ):
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def lowerCAmelCase__ ( self: Any ):
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def lowerCAmelCase__ ( self: Optional[int] ):
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def lowerCAmelCase__ ( self: List[Any] ):
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: List[str]=0.08 , UpperCamelCase_: List[Any]=1.22 , UpperCamelCase_: List[str]=0.03 ):
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def lowerCAmelCase__ ( self: Union[str, Any] ):
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def lowerCAmelCase__ ( self: int ):
return (self.nir / self.green) - 1
def lowerCAmelCase__ ( self: Any ):
return (self.nir / self.redEdge) - 1
def lowerCAmelCase__ ( self: int ):
return (self.red - self.blue) / self.red
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def lowerCAmelCase__ ( self: Tuple ):
return self.nir - self.green
def lowerCAmelCase__ ( self: Any ):
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Optional[Any]=0.16 ):
return (self.nir - self.green) / (self.nir + self.green + y)
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any=0.5 ):
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def lowerCAmelCase__ ( self: Tuple ):
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Any=None , UpperCamelCase_: str=None ):
return (self.nir - b) / (a * self.red)
def lowerCAmelCase__ ( self: List[Any] ):
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def lowerCAmelCase__ ( self: List[str] ):
return (self.red + self.green + self.blue) / 30.5
def lowerCAmelCase__ ( self: Tuple ):
return self.nir / self.red
def lowerCAmelCase__ ( self: int ):
return (self.rvi() - 1) / (self.rvi() + 1)
def lowerCAmelCase__ ( self: Dict ):
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def lowerCAmelCase__ ( self: Dict ):
return self.green / (self.nir + self.red + self.green)
def lowerCAmelCase__ ( self: int ):
return self.nir / (self.nir + self.red + self.green)
def lowerCAmelCase__ ( self: Any ):
return self.red / (self.nir + self.red + self.green)
def lowerCAmelCase__ ( self: Optional[int] ):
return (self.green - self.red) / (self.green + self.red)
def lowerCAmelCase__ ( self: str ):
return (self.red - self.green) / (self.red + self.green)
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
__lowerCamelCase = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def lowerCAmelCase__ ( self: Union[str, Any] ):
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def lowerCAmelCase__ ( self: Dict ):
return self.nir / self.red
def lowerCAmelCase__ ( self: Optional[Any] ):
return (self.ndvi() + 0.5) ** (1 / 2)
def lowerCAmelCase__ ( self: int ):
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 362
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Optional[int] = IFInpaintingPipeline
UpperCAmelCase__ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
UpperCAmelCase__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase__ : List[str] = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCAmelCase__ ( self: List[str] ):
return self._get_dummy_components()
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Dict , UpperCamelCase_: str=0 ):
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase__ ( self: Union[str, Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCAmelCase__ ( self: Union[str, Any] ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCAmelCase__ ( self: Optional[int] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCAmelCase__ ( self: Any ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCAmelCase__ ( self: str ):
self._test_save_load_local()
def lowerCAmelCase__ ( self: str ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 29
| 0
|
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def lowerCamelCase__ ( A__ : dict ):
'''simple docstring'''
return (data["data"], data["target"])
def lowerCamelCase__ ( A__ : np.ndarray , A__ : np.ndarray ):
'''simple docstring'''
__lowerCamelCase = XGBClassifier()
classifier.fit(A__ , A__ )
return classifier
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = load_iris()
__lowerCamelCase, __lowerCamelCase = data_handling(A__ )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = train_test_split(
A__ , A__ , test_size=0.25 )
__lowerCamelCase = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
__lowerCamelCase = xgboost(A__ , A__ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
A__ , A__ , A__ , display_labels=A__ , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 363
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase)
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: str , **UpperCamelCase_: int ):
super().__init__(**UpperCamelCase_ )
if self.framework == "tf":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
requires_backends(self , """vision""" )
self.check_model_type(UpperCamelCase_ )
def __call__( self: Union[str, Any] , UpperCamelCase_: Union[str, "Image.Image", List[Dict[str, Any]]] , UpperCamelCase_: Union[str, List[str]] = None , **UpperCamelCase_: List[str] , ):
if "text_queries" in kwargs:
__lowerCamelCase = kwargs.pop("""text_queries""" )
if isinstance(UpperCamelCase_ , (str, Image.Image) ):
__lowerCamelCase = {"""image""": image, """candidate_labels""": candidate_labels}
else:
__lowerCamelCase = image
__lowerCamelCase = super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
return results
def lowerCAmelCase__ ( self: List[str] , **UpperCamelCase_: Dict ):
__lowerCamelCase = {}
if "threshold" in kwargs:
__lowerCamelCase = kwargs["""threshold"""]
if "top_k" in kwargs:
__lowerCamelCase = kwargs["""top_k"""]
return {}, {}, postprocess_params
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = load_image(inputs["""image"""] )
__lowerCamelCase = inputs["""candidate_labels"""]
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = candidate_labels.split(""",""" )
__lowerCamelCase = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(UpperCamelCase_ ):
__lowerCamelCase = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework )
__lowerCamelCase = self.image_processor(UpperCamelCase_ , return_tensors=self.framework )
yield {
"is_last": i == len(UpperCamelCase_ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Tuple ):
__lowerCamelCase = model_inputs.pop("""target_size""" )
__lowerCamelCase = model_inputs.pop("""candidate_label""" )
__lowerCamelCase = model_inputs.pop("""is_last""" )
__lowerCamelCase = self.model(**UpperCamelCase_ )
__lowerCamelCase = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs}
return model_outputs
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: Dict=0.1 , UpperCamelCase_: Union[str, Any]=None ):
__lowerCamelCase = []
for model_output in model_outputs:
__lowerCamelCase = model_output["""candidate_label"""]
__lowerCamelCase = BaseModelOutput(UpperCamelCase_ )
__lowerCamelCase = self.image_processor.post_process_object_detection(
outputs=UpperCamelCase_ , threshold=UpperCamelCase_ , target_sizes=model_output["""target_size"""] )[0]
for index in outputs["scores"].nonzero():
__lowerCamelCase = outputs["""scores"""][index].item()
__lowerCamelCase = self._get_bounding_box(outputs["""boxes"""][index][0] )
__lowerCamelCase = {"""score""": score, """label""": label, """box""": box}
results.append(UpperCamelCase_ )
__lowerCamelCase = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : x["score"] , reverse=UpperCamelCase_ )
if top_k:
__lowerCamelCase = results[:top_k]
return results
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: "torch.Tensor" ):
if self.framework != "pt":
raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""" )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = box.int().tolist()
__lowerCamelCase = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 29
| 0
|
def lowerCamelCase__ ( A__ : int = 1000000 ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = set(range(3 , A__ , 2 ) )
primes.add(2 )
for p in range(3 , A__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , A__ , A__ ) ) )
__lowerCamelCase : Union[str, Any] = [float(A__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(A__ , limit + 1 , A__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 364
|
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece.model')
UpperCAmelCase_ = {'target_lang': 'fi', 'source_lang': 'en'}
UpperCAmelCase_ = '>>zh<<'
UpperCAmelCase_ = 'Helsinki-NLP/'
if is_torch_available():
UpperCAmelCase_ = 'pt'
elif is_tf_available():
UpperCAmelCase_ = 'tf'
else:
UpperCAmelCase_ = 'jax'
@require_sentencepiece
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Union[str, Any] = MarianTokenizer
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : int = True
def lowerCAmelCase__ ( self: Union[str, Any] ):
super().setUp()
__lowerCamelCase = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
__lowerCamelCase = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__lowerCamelCase = Path(self.tmpdirname )
save_json(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
__lowerCamelCase = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self: Optional[Any] , **UpperCamelCase_: Any ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Optional[int] ):
return (
"This is a test",
"This is a test",
)
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = """</s>"""
__lowerCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(UpperCamelCase_ ) , 9 )
def lowerCAmelCase__ ( self: Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = MarianTokenizer.from_pretrained(F'{ORG_NAME}opus-mt-en-de' )
__lowerCamelCase = en_de_tokenizer(["""I am a small frog"""] , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = [38, 1_21, 14, 6_97, 3_88_48, 0]
self.assertListEqual(UpperCamelCase_ , batch.input_ids[0] )
__lowerCamelCase = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = [x.name for x in Path(UpperCamelCase_ ).glob("""*""" )]
self.assertIn("""source.spm""" , UpperCamelCase_ )
MarianTokenizer.from_pretrained(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = tok(
["""I am a small frog""" * 10_00, """I am a small frog"""] , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 5_12) )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def lowerCAmelCase__ ( self: Optional[int] ):
# fmt: off
__lowerCamelCase = {"""input_ids""": [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase_ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
__lowerCamelCase = """Tämä on testi"""
__lowerCamelCase = """This is a test"""
__lowerCamelCase = [76, 7, 20_47, 2]
__lowerCamelCase = [69, 12, 11, 9_40, 2]
__lowerCamelCase = tokenizer(UpperCamelCase_ ).input_ids
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = tokenizer(text_target=UpperCamelCase_ ).input_ids
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
| 29
| 0
|
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase__( unittest.TestCase):
def __init__( self: List[str] , UpperCamelCase_: Tuple , UpperCamelCase_: str=3 , UpperCamelCase_: Optional[Any]=32 , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: Dict=10 , UpperCamelCase_: Union[str, Any]=[10, 20, 30, 40] , UpperCamelCase_: Dict=[1, 1, 2, 1] , UpperCamelCase_: int=True , UpperCamelCase_: List[Any]=True , UpperCamelCase_: Union[str, Any]="relu" , UpperCamelCase_: List[str]=3 , UpperCamelCase_: List[str]=None , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = image_size
__lowerCamelCase = num_channels
__lowerCamelCase = embeddings_size
__lowerCamelCase = hidden_sizes
__lowerCamelCase = depths
__lowerCamelCase = is_training
__lowerCamelCase = use_labels
__lowerCamelCase = hidden_act
__lowerCamelCase = num_labels
__lowerCamelCase = scope
__lowerCamelCase = len(UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase = self.get_config()
return config, pixel_values
def lowerCAmelCase__ ( self: List[Any] ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: int , UpperCamelCase_: Dict ):
__lowerCamelCase = FlaxRegNetModel(config=UpperCamelCase_ )
__lowerCamelCase = model(UpperCamelCase_ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: List[str] , UpperCamelCase_: List[Any] ):
__lowerCamelCase = self.num_labels
__lowerCamelCase = FlaxRegNetForImageClassification(config=UpperCamelCase_ )
__lowerCamelCase = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase, __lowerCamelCase = config_and_inputs
__lowerCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Optional[int] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : List[str] = False
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = FlaxRegNetModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase__ ( self: Union[str, Any] ):
return
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ )
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def lowerCAmelCase__ ( self: Optional[int] ):
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def lowerCAmelCase__ ( self: str ):
pass
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(UpperCamelCase_ )
__lowerCamelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def lowerCAmelCase__ ( self: str ):
def check_hidden_states_output(UpperCamelCase_: Tuple , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Dict ):
__lowerCamelCase = model_class(UpperCamelCase_ )
__lowerCamelCase = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
__lowerCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCamelCase = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase_ ) , expected_num_stages + 1 )
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCamelCase = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = model_class(UpperCamelCase_ )
@jax.jit
def model_jitted(UpperCamelCase_: Tuple , **UpperCamelCase_: str ):
return model(pixel_values=UpperCamelCase_ , **UpperCamelCase_ )
with self.subTest("""JIT Enabled""" ):
__lowerCamelCase = model_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__lowerCamelCase = model_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_flax
class lowerCamelCase__( unittest.TestCase):
@cached_property
def lowerCAmelCase__ ( self: Union[str, Any] ):
return AutoImageProcessor.from_pretrained("""facebook/regnet-y-040""" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = FlaxRegNetForImageClassification.from_pretrained("""facebook/regnet-y-040""" )
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(images=UpperCamelCase_ , return_tensors="""np""" )
__lowerCamelCase = model(**UpperCamelCase_ )
# verify the logits
__lowerCamelCase = (1, 10_00)
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
__lowerCamelCase = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 365
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class lowerCamelCase__( unittest.TestCase):
@parameterized.expand([(None,), ("""foo.json""",)] )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: List[str] ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCamelCase_ , config_name=UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ , config_name=UpperCamelCase_ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , UpperCamelCase_ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = AutoConfig.from_pretrained("""gpt2""" )
__lowerCamelCase = GenerationConfig.from_model_config(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = GenerationConfig()
__lowerCamelCase = {
"""max_new_tokens""": 10_24,
"""foo""": """bar""",
}
__lowerCamelCase = copy.deepcopy(UpperCamelCase_ )
__lowerCamelCase = generation_config.update(**UpperCamelCase_ )
# update_kwargs was not modified (no side effects)
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 10_24 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(UpperCamelCase_ , {"""foo""": """bar"""} )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = GenerationConfig()
__lowerCamelCase = """bar"""
with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir:
generation_config.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""" )
__lowerCamelCase = GenerationConfig.from_model_config(UpperCamelCase_ )
assert not hasattr(UpperCamelCase_ , """foo""" ) # no new kwargs should be initialized if from config
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , UpperCamelCase_ )
self.assertEqual(default_config.num_beams , 1 )
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , UpperCamelCase_ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , UpperCamelCase_ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class lowerCamelCase__( unittest.TestCase):
@classmethod
def lowerCAmelCase__ ( cls: Optional[Any] ):
__lowerCamelCase = TOKEN
HfFolder.save_token(UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: str ):
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" )
except HTTPError:
pass
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained(F'{USER}/test-generation-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCamelCase_ , repo_id="""test-generation-config""" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained(F'{USER}/test-generation-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCamelCase_ , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
| 29
| 0
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase__:
def __init__( self: List[str] , UpperCamelCase_: Any , UpperCamelCase_: Optional[int]=13 , UpperCamelCase_: List[Any]=32 , UpperCamelCase_: List[Any]=3 , UpperCamelCase_: Union[str, Any]=4 , UpperCamelCase_: List[str]=[10, 20, 30, 40] , UpperCamelCase_: List[str]=[2, 2, 3, 2] , UpperCamelCase_: Tuple=True , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: Optional[int]=37 , UpperCamelCase_: str="gelu" , UpperCamelCase_: List[str]=10 , UpperCamelCase_: List[str]=0.02 , UpperCamelCase_: Optional[Any]=["stage2", "stage3", "stage4"] , UpperCamelCase_: Any=3 , UpperCamelCase_: Optional[int]=None , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = image_size
__lowerCamelCase = num_channels
__lowerCamelCase = num_stages
__lowerCamelCase = hidden_sizes
__lowerCamelCase = depths
__lowerCamelCase = is_training
__lowerCamelCase = use_labels
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = out_features
__lowerCamelCase = num_labels
__lowerCamelCase = scope
__lowerCamelCase = num_stages
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self: Union[str, Any] ):
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def lowerCAmelCase__ ( self: Any ):
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_12 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=UpperCamelCase_ , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=2_56 , auxiliary_num_convs=1 , auxiliary_concat_input=UpperCamelCase_ , loss_ignore_index=2_55 , num_labels=self.num_labels , )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: List[str] ):
__lowerCamelCase = UperNetForSemanticSegmentation(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(UpperCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
),
) = config_and_inputs
__lowerCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : int = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
UpperCAmelCase__ : int = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : str = False
UpperCAmelCase__ : Any = False
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = UperNetModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def lowerCAmelCase__ ( self: Dict ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase__ ( self: str ):
return
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(UpperCamelCase_ )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase_ )
@unittest.skip(reason="""UperNet does not use inputs_embeds""" )
def lowerCAmelCase__ ( self: Dict ):
pass
@unittest.skip(reason="""UperNet does not support input and output embeddings""" )
def lowerCAmelCase__ ( self: Dict ):
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def lowerCAmelCase__ ( self: int ):
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def lowerCAmelCase__ ( self: str ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def lowerCAmelCase__ ( self: Optional[int] ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase__ ( self: Any ):
pass
def lowerCAmelCase__ ( self: Union[str, Any] ):
def check_hidden_states_output(UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Tuple ):
__lowerCamelCase = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
__lowerCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCamelCase = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase_ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = _config_zero_init(UpperCamelCase_ )
__lowerCamelCase = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(config=UpperCamelCase_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip(reason="""UperNet does not have tied weights""" )
def lowerCAmelCase__ ( self: List[str] ):
pass
@slow
def lowerCAmelCase__ ( self: Optional[int] ):
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = UperNetForSemanticSegmentation.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = hf_hub_download(
repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" )
__lowerCamelCase = Image.open(A__ ).convert("""RGB""" )
return image
@require_torch
@require_vision
@slow
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" )
__lowerCamelCase = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(UpperCamelCase_ )
__lowerCamelCase = prepare_img()
__lowerCamelCase = processor(images=UpperCamelCase_ , return_tensors="""pt""" ).to(UpperCamelCase_ )
with torch.no_grad():
__lowerCamelCase = model(**UpperCamelCase_ )
__lowerCamelCase = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
__lowerCamelCase = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" )
__lowerCamelCase = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(UpperCamelCase_ )
__lowerCamelCase = prepare_img()
__lowerCamelCase = processor(images=UpperCamelCase_ , return_tensors="""pt""" ).to(UpperCamelCase_ )
with torch.no_grad():
__lowerCamelCase = model(**UpperCamelCase_ )
__lowerCamelCase = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
__lowerCamelCase = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 366
|
def lowerCamelCase__ ( A__ : list ):
'''simple docstring'''
for i in range(len(A__ ) - 1 , 0 , -1 ):
__lowerCamelCase = False
for j in range(A__ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
__lowerCamelCase, __lowerCamelCase = unsorted[j - 1], unsorted[j]
__lowerCamelCase = True
for j in range(A__ ):
if unsorted[j] > unsorted[j + 1]:
__lowerCamelCase, __lowerCamelCase = unsorted[j + 1], unsorted[j]
__lowerCamelCase = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ = [int(item) for item in user_input.split(',')]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 29
| 0
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowerCamelCase__ ( A__ : Dict , A__ : Optional[int]=False ):
'''simple docstring'''
try:
__lowerCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__lowerCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
__lowerCamelCase = strtobool(A__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
UpperCAmelCase_ = parse_flag_from_env('RUN_SLOW', default=False)
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skip('Test was skipped' )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(A__ )
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(A__ )
def lowerCamelCase__ ( A__ : List[str] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(A__ )
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(A__ )
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(A__ )
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(A__ )
def lowerCamelCase__ ( A__ : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(A__ )
def lowerCamelCase__ ( A__ : Tuple=None , A__ : Optional[Any]=None ):
'''simple docstring'''
if test_case is None:
return partial(A__ , version=A__ )
return unittest.skipUnless(is_torch_version('>=' , A__ ) , f'test requires torch version >= {version}' )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(A__ )
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(A__ )
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(A__ )
UpperCAmelCase_ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(A__ )
class lowerCamelCase__( unittest.TestCase):
UpperCAmelCase__ : List[Any] = True
@classmethod
def lowerCAmelCase__ ( cls: int ):
__lowerCamelCase = tempfile.mkdtemp()
@classmethod
def lowerCAmelCase__ ( cls: Any ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def lowerCAmelCase__ ( self: Any ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCamelCase_ )
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: int ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Union[mock.Mock, List[mock.Mock]] ):
__lowerCamelCase = mocks if isinstance(UpperCamelCase_ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase = AcceleratorState()
__lowerCamelCase = tensor[None].clone().to(state.device )
__lowerCamelCase = gather(A__ ).cpu()
__lowerCamelCase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , A__ ):
return False
return True
class lowerCamelCase__:
def __init__( self: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: Any , UpperCamelCase_: Any ):
__lowerCamelCase = returncode
__lowerCamelCase = stdout
__lowerCamelCase = stderr
async def lowerCamelCase__ ( A__ : int , A__ : Any ):
'''simple docstring'''
while True:
__lowerCamelCase = await stream.readline()
if line:
callback(A__ )
else:
break
async def lowerCamelCase__ ( A__ : Dict , A__ : List[str]=None , A__ : Any=None , A__ : Optional[Any]=None , A__ : Tuple=False , A__ : List[Any]=False ):
'''simple docstring'''
if echo:
print('\nRunning: ' , ' '.join(A__ ) )
__lowerCamelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=A__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=A__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__lowerCamelCase = []
__lowerCamelCase = []
def tee(A__ : int , A__ : Any , A__ : Optional[Any] , A__ : int="" ):
__lowerCamelCase = line.decode('utf-8' ).rstrip()
sink.append(A__ )
if not quiet:
print(A__ , A__ , file=A__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda A__ : tee(A__ , A__ , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda A__ : tee(A__ , A__ , sys.stderr , label='stderr:' ) ) ),
] , timeout=A__ , )
return _RunOutput(await p.wait() , A__ , A__ )
def lowerCamelCase__ ( A__ : Optional[Any] , A__ : Any=None , A__ : Union[str, Any]=None , A__ : Dict=180 , A__ : str=False , A__ : List[Any]=True ):
'''simple docstring'''
__lowerCamelCase = asyncio.get_event_loop()
__lowerCamelCase = loop.run_until_complete(
_stream_subprocess(A__ , env=A__ , stdin=A__ , timeout=A__ , quiet=A__ , echo=A__ ) )
__lowerCamelCase = ' '.join(A__ )
if result.returncode > 0:
__lowerCamelCase = '\n'.join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
return result
class lowerCamelCase__( __lowerCamelCase):
pass
def lowerCamelCase__ ( A__ : List[str] , A__ : Union[str, Any]=False ):
'''simple docstring'''
try:
__lowerCamelCase = subprocess.check_output(A__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(A__ , 'decode' ):
__lowerCamelCase = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'Command `{" ".join(A__ )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 367
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowerCamelCase__ ( A__ : Dict , A__ : Optional[int]=False ):
'''simple docstring'''
try:
__lowerCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__lowerCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
__lowerCamelCase = strtobool(A__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
UpperCAmelCase_ = parse_flag_from_env('RUN_SLOW', default=False)
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skip("""Test was skipped""" )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , """test is slow""" )(A__ )
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , """test requires only a CPU""" )(A__ )
def lowerCamelCase__ ( A__ : List[str] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , """test requires a GPU""" )(A__ )
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , """test requires a XPU""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , """test requires a `mps` backend support in `torch`""" )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , """test requires the Hugging Face suite""" )(A__ )
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , """test requires the bitsandbytes library""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , """test requires TPU""" )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , """test requires a GPU""" )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , """test requires a XPU""" )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , """test requires multiple GPUs""" )(A__ )
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , """test requires multiple XPUs""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , """test requires safetensors""" )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , """test requires DeepSpeed""" )(A__ )
def lowerCamelCase__ ( A__ : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version(""">=""" , """1.12.0""" ) , """test requires torch version >= 1.12.0""" )(A__ )
def lowerCamelCase__ ( A__ : Tuple=None , A__ : Optional[Any]=None ):
'''simple docstring'''
if test_case is None:
return partial(A__ , version=A__ )
return unittest.skipUnless(is_torch_version(""">=""" , A__ ) , f'test requires torch version >= {version}' )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , """test requires Tensorboard""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , """test requires wandb""" )(A__ )
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , """test requires comet_ml""" )(A__ )
UpperCAmelCase_ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , """test requires at least one tracker to be available and for `comet_ml` to not be installed""" , )(A__ )
class lowerCamelCase__( unittest.TestCase):
UpperCAmelCase__ : List[Any] = True
@classmethod
def lowerCAmelCase__ ( cls: int ):
__lowerCamelCase = tempfile.mkdtemp()
@classmethod
def lowerCAmelCase__ ( cls: Any ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def lowerCAmelCase__ ( self: Any ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("""**/*""" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCamelCase_ )
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: int ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Union[mock.Mock, List[mock.Mock]] ):
__lowerCamelCase = mocks if isinstance(UpperCamelCase_ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase = AcceleratorState()
__lowerCamelCase = tensor[None].clone().to(state.device )
__lowerCamelCase = gather(A__ ).cpu()
__lowerCamelCase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , A__ ):
return False
return True
class lowerCamelCase__:
def __init__( self: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: Any , UpperCamelCase_: Any ):
__lowerCamelCase = returncode
__lowerCamelCase = stdout
__lowerCamelCase = stderr
async def lowerCamelCase__ ( A__ : int , A__ : Any ):
'''simple docstring'''
while True:
__lowerCamelCase = await stream.readline()
if line:
callback(A__ )
else:
break
async def lowerCamelCase__ ( A__ : Dict , A__ : List[str]=None , A__ : Any=None , A__ : Optional[Any]=None , A__ : Tuple=False , A__ : List[Any]=False ):
'''simple docstring'''
if echo:
print("""\nRunning: """ , """ """.join(A__ ) )
__lowerCamelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=A__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=A__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__lowerCamelCase = []
__lowerCamelCase = []
def tee(A__ : int , A__ : Any , A__ : Optional[Any] , A__ : int="" ):
__lowerCamelCase = line.decode("""utf-8""" ).rstrip()
sink.append(A__ )
if not quiet:
print(A__ , A__ , file=A__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda A__ : tee(A__ , A__ , sys.stdout , label="""stdout:""" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda A__ : tee(A__ , A__ , sys.stderr , label="""stderr:""" ) ) ),
] , timeout=A__ , )
return _RunOutput(await p.wait() , A__ , A__ )
def lowerCamelCase__ ( A__ : Optional[Any] , A__ : Any=None , A__ : Union[str, Any]=None , A__ : Dict=180 , A__ : str=False , A__ : List[Any]=True ):
'''simple docstring'''
__lowerCamelCase = asyncio.get_event_loop()
__lowerCamelCase = loop.run_until_complete(
_stream_subprocess(A__ , env=A__ , stdin=A__ , timeout=A__ , quiet=A__ , echo=A__ ) )
__lowerCamelCase = """ """.join(A__ )
if result.returncode > 0:
__lowerCamelCase = """\n""".join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
return result
class lowerCamelCase__( __lowerCamelCase):
pass
def lowerCamelCase__ ( A__ : List[str] , A__ : Union[str, Any]=False ):
'''simple docstring'''
try:
__lowerCamelCase = subprocess.check_output(A__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(A__ , """decode""" ):
__lowerCamelCase = output.decode("""utf-8""" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'Command `{" ".join(A__ )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 29
| 0
|
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
__lowerCamelCase = [False] * len(A__ )
__lowerCamelCase = [-1] * len(A__ )
def dfs(A__ : Optional[int] , A__ : Optional[int] ):
__lowerCamelCase = True
__lowerCamelCase = c
for u in graph[v]:
if not visited[u]:
dfs(A__ , 1 - c )
for i in range(len(A__ ) ):
if not visited[i]:
dfs(A__ , 0 )
for i in range(len(A__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
UpperCAmelCase_ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 368
|
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
UpperCAmelCase_ = datasets.utils.logging.get_logger(__name__)
class lowerCamelCase__( folder_based_builder.FolderBasedBuilderConfig):
UpperCAmelCase__ : bool = None
UpperCAmelCase__ : bool = None
class lowerCamelCase__( folder_based_builder.FolderBasedBuilder):
UpperCAmelCase__ : List[Any] = datasets.Audio()
UpperCAmelCase__ : str = 'audio'
UpperCAmelCase__ : Union[str, Any] = AudioFolderConfig
UpperCAmelCase__ : List[str] # definition at the bottom of the script
UpperCAmelCase__ : Optional[int] = AudioClassification(audio_column='audio' , label_column='label')
UpperCAmelCase_ = [
'.aiff',
'.au',
'.avr',
'.caf',
'.flac',
'.htk',
'.svx',
'.mat4',
'.mat5',
'.mpc2k',
'.ogg',
'.paf',
'.pvf',
'.raw',
'.rf64',
'.sd2',
'.sds',
'.ircam',
'.voc',
'.w64',
'.wav',
'.nist',
'.wavex',
'.wve',
'.xi',
'.mp3',
'.opus',
]
UpperCAmelCase_ = AUDIO_EXTENSIONS
| 29
| 0
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Union[PIL.Image.Image, np.ndarray]
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: int , UpperCamelCase_: PriorTransformer , UpperCamelCase_: CLIPVisionModel , UpperCamelCase_: CLIPImageProcessor , UpperCamelCase_: HeunDiscreteScheduler , UpperCamelCase_: ShapERenderer , ):
super().__init__()
self.register_modules(
prior=UpperCamelCase_ , image_encoder=UpperCamelCase_ , image_processor=UpperCamelCase_ , scheduler=UpperCamelCase_ , renderer=UpperCamelCase_ , )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: int , UpperCamelCase_: Any , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: Tuple ):
if latents is None:
__lowerCamelCase = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
__lowerCamelCase = latents.to(UpperCamelCase_ )
__lowerCamelCase = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: List[str]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
__lowerCamelCase = torch.device(F'cuda:{gpu_id}' )
__lowerCamelCase = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase_ , UpperCamelCase_ )
@property
def lowerCAmelCase__ ( self: int ):
if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(UpperCamelCase_ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCAmelCase__ ( self: str , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: List[str] , UpperCamelCase_: Dict , ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(image[0] , torch.Tensor ):
__lowerCamelCase = torch.cat(UpperCamelCase_ , axis=0 ) if image[0].ndim == 4 else torch.stack(UpperCamelCase_ , axis=0 )
if not isinstance(UpperCamelCase_ , torch.Tensor ):
__lowerCamelCase = self.image_processor(UpperCamelCase_ , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 )
__lowerCamelCase = image.to(dtype=self.image_encoder.dtype , device=UpperCamelCase_ )
__lowerCamelCase = self.image_encoder(UpperCamelCase_ )["""last_hidden_state"""]
__lowerCamelCase = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
__lowerCamelCase = image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
if do_classifier_free_guidance:
__lowerCamelCase = torch.zeros_like(UpperCamelCase_ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCamelCase = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(UpperCamelCase_ )
def __call__( self: Dict , UpperCamelCase_: Union[PIL.Image.Image, List[PIL.Image.Image]] , UpperCamelCase_: int = 1 , UpperCamelCase_: int = 25 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: float = 4.0 , UpperCamelCase_: int = 64 , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , ):
if isinstance(UpperCamelCase_ , PIL.Image.Image ):
__lowerCamelCase = 1
elif isinstance(UpperCamelCase_ , torch.Tensor ):
__lowerCamelCase = image.shape[0]
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
__lowerCamelCase = len(UpperCamelCase_ )
else:
raise ValueError(
F'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(UpperCamelCase_ )}' )
__lowerCamelCase = self._execution_device
__lowerCamelCase = batch_size * num_images_per_prompt
__lowerCamelCase = guidance_scale > 1.0
__lowerCamelCase = self._encode_image(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# prior
self.scheduler.set_timesteps(UpperCamelCase_ , device=UpperCamelCase_ )
__lowerCamelCase = self.scheduler.timesteps
__lowerCamelCase = self.prior.config.num_embeddings
__lowerCamelCase = self.prior.config.embedding_dim
__lowerCamelCase = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
__lowerCamelCase = latents.reshape(latents.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
__lowerCamelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCamelCase = self.scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = self.prior(
UpperCamelCase_ , timestep=UpperCamelCase_ , proj_embedding=UpperCamelCase_ , ).predicted_image_embedding
# remove the variance
__lowerCamelCase, __lowerCamelCase = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
__lowerCamelCase, __lowerCamelCase = noise_pred.chunk(2 )
__lowerCamelCase = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
__lowerCamelCase = self.scheduler.step(
UpperCamelCase_ , timestep=UpperCamelCase_ , sample=UpperCamelCase_ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=UpperCamelCase_ )
__lowerCamelCase = []
for i, latent in enumerate(UpperCamelCase_ ):
print()
__lowerCamelCase = self.renderer.decode(
latent[None, :] , UpperCamelCase_ , size=UpperCamelCase_ , ray_batch_size=40_96 , n_coarse_samples=64 , n_fine_samples=1_28 , )
images.append(UpperCamelCase_ )
__lowerCamelCase = torch.stack(UpperCamelCase_ )
if output_type not in ["np", "pil"]:
raise ValueError(F'Only the output types `pil` and `np` are supported not output_type={output_type}' )
__lowerCamelCase = images.cpu().numpy()
if output_type == "pil":
__lowerCamelCase = [self.numpy_to_pil(UpperCamelCase_ ) for image in images]
# Offload last model to CPU
if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=UpperCamelCase_ )
| 369
|
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : str = 'segformer'
def __init__( self: Union[str, Any] , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: Any=4 , UpperCamelCase_: int=[2, 2, 2, 2] , UpperCamelCase_: Optional[Any]=[8, 4, 2, 1] , UpperCamelCase_: Union[str, Any]=[32, 64, 1_60, 2_56] , UpperCamelCase_: int=[7, 3, 3, 3] , UpperCamelCase_: Dict=[4, 2, 2, 2] , UpperCamelCase_: str=[1, 2, 5, 8] , UpperCamelCase_: List[str]=[4, 4, 4, 4] , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: List[Any]=0.0 , UpperCamelCase_: List[Any]=0.0 , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: Optional[int]=1E-6 , UpperCamelCase_: Optional[int]=2_56 , UpperCamelCase_: Optional[Any]=2_55 , **UpperCamelCase_: List[Any] , ):
super().__init__(**UpperCamelCase_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , UpperCamelCase_ , )
__lowerCamelCase = num_channels
__lowerCamelCase = num_encoder_blocks
__lowerCamelCase = depths
__lowerCamelCase = sr_ratios
__lowerCamelCase = hidden_sizes
__lowerCamelCase = patch_sizes
__lowerCamelCase = strides
__lowerCamelCase = mlp_ratios
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = classifier_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = drop_path_rate
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = decoder_hidden_size
__lowerCamelCase = kwargs.get("""reshape_last_stage""" , UpperCamelCase_ )
__lowerCamelCase = semantic_loss_ignore_index
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Any = version.parse('1.11')
@property
def lowerCAmelCase__ ( self: Any ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase__ ( self: Union[str, Any] ):
return 1E-4
@property
def lowerCAmelCase__ ( self: Dict ):
return 12
| 29
| 0
|
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: Optional[Any] , UpperCamelCase_: Optional[NestedDataStructureLike[PathLike]] = None , UpperCamelCase_: Optional[NamedSplit] = None , UpperCamelCase_: Optional[Features] = None , UpperCamelCase_: str = None , UpperCamelCase_: bool = False , UpperCamelCase_: bool = False , UpperCamelCase_: Optional[int] = None , **UpperCamelCase_: Tuple , ):
__lowerCamelCase = path_or_paths
__lowerCamelCase = split if split or isinstance(UpperCamelCase_ , UpperCamelCase_ ) else """train"""
__lowerCamelCase = features
__lowerCamelCase = cache_dir
__lowerCamelCase = keep_in_memory
__lowerCamelCase = streaming
__lowerCamelCase = num_proc
__lowerCamelCase = kwargs
@abstractmethod
def lowerCAmelCase__ ( self: List[Any] ):
pass
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: Any , UpperCamelCase_: Optional[Features] = None , UpperCamelCase_: str = None , UpperCamelCase_: bool = False , UpperCamelCase_: bool = False , UpperCamelCase_: Optional[int] = None , **UpperCamelCase_: int , ):
__lowerCamelCase = features
__lowerCamelCase = cache_dir
__lowerCamelCase = keep_in_memory
__lowerCamelCase = streaming
__lowerCamelCase = num_proc
__lowerCamelCase = kwargs
@abstractmethod
def lowerCAmelCase__ ( self: Tuple ):
pass
| 370
|
import string
import numpy
def lowerCamelCase__ ( A__ : int , A__ : int ):
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a , A__ )
class lowerCamelCase__:
UpperCAmelCase__ : Optional[int] = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
UpperCAmelCase__ : Optional[int] = numpy.vectorize(lambda __lowerCamelCase: x % 36)
UpperCAmelCase__ : List[Any] = numpy.vectorize(__lowerCamelCase)
def __init__( self: List[Any] , UpperCamelCase_: numpy.ndarray ):
__lowerCamelCase = self.modulus(UpperCamelCase_ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__lowerCamelCase = encrypt_key.shape[0]
def lowerCAmelCase__ ( self: str , UpperCamelCase_: str ):
return self.key_string.index(UpperCamelCase_ )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: int ):
return self.key_string[round(UpperCamelCase_ )]
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__lowerCamelCase = det % len(self.key_string )
__lowerCamelCase = len(self.key_string )
if greatest_common_divisor(UpperCamelCase_ , len(self.key_string ) ) != 1:
__lowerCamelCase = (
F'determinant modular {req_l} of encryption key({det}) '
F'is not co prime w.r.t {req_l}.\nTry another key.'
)
raise ValueError(UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: str ):
__lowerCamelCase = [char for char in text.upper() if char in self.key_string]
__lowerCamelCase = chars[-1]
while len(UpperCamelCase_ ) % self.break_key != 0:
chars.append(UpperCamelCase_ )
return "".join(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
__lowerCamelCase = self.process_text(text.upper() )
__lowerCamelCase = """"""
for i in range(0 , len(UpperCamelCase_ ) - self.break_key + 1 , self.break_key ):
__lowerCamelCase = text[i : i + self.break_key]
__lowerCamelCase = [self.replace_letters(UpperCamelCase_ ) for char in batch]
__lowerCamelCase = numpy.array([vec] ).T
__lowerCamelCase = self.modulus(self.encrypt_key.dot(UpperCamelCase_ ) ).T.tolist()[
0
]
__lowerCamelCase = """""".join(
self.replace_digits(UpperCamelCase_ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__lowerCamelCase = det % len(self.key_string )
__lowerCamelCase = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
__lowerCamelCase = i
break
__lowerCamelCase = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
__lowerCamelCase = self.make_decrypt_key()
__lowerCamelCase = self.process_text(text.upper() )
__lowerCamelCase = """"""
for i in range(0 , len(UpperCamelCase_ ) - self.break_key + 1 , self.break_key ):
__lowerCamelCase = text[i : i + self.break_key]
__lowerCamelCase = [self.replace_letters(UpperCamelCase_ ) for char in batch]
__lowerCamelCase = numpy.array([vec] ).T
__lowerCamelCase = self.modulus(decrypt_key.dot(UpperCamelCase_ ) ).T.tolist()[0]
__lowerCamelCase = """""".join(
self.replace_digits(UpperCamelCase_ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = int(input("""Enter the order of the encryption key: """ ) )
__lowerCamelCase = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(A__ ):
__lowerCamelCase = [int(A__ ) for x in input().split()]
hill_matrix.append(A__ )
__lowerCamelCase = HillCipher(numpy.array(A__ ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
__lowerCamelCase = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
__lowerCamelCase = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(A__ ) )
elif option == "2":
__lowerCamelCase = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(A__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 29
| 0
|
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'huggingface/autoformer-tourism-monthly': 'https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json',
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : int = 'autoformer'
UpperCAmelCase__ : int = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self: Optional[int] , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: str = "student_t" , UpperCamelCase_: str = "nll" , UpperCamelCase_: int = 1 , UpperCamelCase_: List[int] = [1, 2, 3, 4, 5, 6, 7] , UpperCamelCase_: bool = True , UpperCamelCase_: int = 0 , UpperCamelCase_: int = 0 , UpperCamelCase_: int = 0 , UpperCamelCase_: int = 0 , UpperCamelCase_: Optional[List[int]] = None , UpperCamelCase_: Optional[List[int]] = None , UpperCamelCase_: int = 64 , UpperCamelCase_: int = 2 , UpperCamelCase_: int = 2 , UpperCamelCase_: int = 2 , UpperCamelCase_: int = 2 , UpperCamelCase_: int = 32 , UpperCamelCase_: int = 32 , UpperCamelCase_: str = "gelu" , UpperCamelCase_: float = 0.1 , UpperCamelCase_: float = 0.1 , UpperCamelCase_: float = 0.1 , UpperCamelCase_: float = 0.1 , UpperCamelCase_: float = 0.1 , UpperCamelCase_: int = 1_00 , UpperCamelCase_: float = 0.02 , UpperCamelCase_: bool = True , UpperCamelCase_: List[Any]=True , UpperCamelCase_: int = 10 , UpperCamelCase_: int = 25 , UpperCamelCase_: int = 3 , **UpperCamelCase_: str , ):
# time series specific configuration
__lowerCamelCase = prediction_length
__lowerCamelCase = context_length if context_length is not None else prediction_length
__lowerCamelCase = distribution_output
__lowerCamelCase = loss
__lowerCamelCase = input_size
__lowerCamelCase = num_time_features
__lowerCamelCase = lags_sequence
__lowerCamelCase = scaling
__lowerCamelCase = num_dynamic_real_features
__lowerCamelCase = num_static_real_features
__lowerCamelCase = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(UpperCamelCase_ ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
__lowerCamelCase = cardinality
else:
__lowerCamelCase = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(UpperCamelCase_ ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
__lowerCamelCase = embedding_dimension
else:
__lowerCamelCase = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__lowerCamelCase = num_parallel_samples
# Transformer architecture configuration
__lowerCamelCase = input_size * len(self.lags_sequence ) + self._number_of_features
__lowerCamelCase = d_model
__lowerCamelCase = encoder_attention_heads
__lowerCamelCase = decoder_attention_heads
__lowerCamelCase = encoder_ffn_dim
__lowerCamelCase = decoder_ffn_dim
__lowerCamelCase = encoder_layers
__lowerCamelCase = decoder_layers
__lowerCamelCase = dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = encoder_layerdrop
__lowerCamelCase = decoder_layerdrop
__lowerCamelCase = activation_function
__lowerCamelCase = init_std
__lowerCamelCase = use_cache
# Autoformer
__lowerCamelCase = label_length
__lowerCamelCase = moving_average
__lowerCamelCase = autocorrelation_factor
super().__init__(is_encoder_decoder=UpperCamelCase_ , **UpperCamelCase_ )
@property
def lowerCAmelCase__ ( self: Tuple ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 371
|
import qiskit
def lowerCamelCase__ ( A__ : int , A__ : int ):
'''simple docstring'''
__lowerCamelCase = qiskit.Aer.get_backend("""aer_simulator""" )
__lowerCamelCase = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
__lowerCamelCase = qiskit.execute(A__ , A__ , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(A__ )
if __name__ == "__main__":
UpperCAmelCase_ = half_adder(1, 1)
print(f"""Half Adder Output Qubit Counts: {counts}""")
| 29
| 0
|
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def lowerCamelCase__ ( A__ : Optional[int] , A__ : Optional[int]="shi-labs/oneformer_demo" ):
'''simple docstring'''
with open(hf_hub_download(A__ , A__ , repo_type="""dataset""" ) , """r""" ) as f:
__lowerCamelCase = json.load(A__ )
__lowerCamelCase = {}
__lowerCamelCase = []
__lowerCamelCase = []
for key, info in class_info.items():
__lowerCamelCase = info["""name"""]
class_names.append(info["""name"""] )
if info["isthing"]:
thing_ids.append(int(A__ ) )
__lowerCamelCase = thing_ids
__lowerCamelCase = class_names
return metadata
class lowerCamelCase__( unittest.TestCase):
def __init__( self: Union[str, Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: Dict=7 , UpperCamelCase_: str=3 , UpperCamelCase_: Optional[Any]=30 , UpperCamelCase_: Tuple=4_00 , UpperCamelCase_: int=None , UpperCamelCase_: Tuple=True , UpperCamelCase_: Any=True , UpperCamelCase_: int=[0.5, 0.5, 0.5] , UpperCamelCase_: Any=[0.5, 0.5, 0.5] , UpperCamelCase_: int=10 , UpperCamelCase_: List[Any]=False , UpperCamelCase_: int=2_55 , UpperCamelCase_: List[Any]="shi-labs/oneformer_demo" , UpperCamelCase_: str="ade20k_panoptic.json" , UpperCamelCase_: Any=10 , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = num_channels
__lowerCamelCase = min_resolution
__lowerCamelCase = max_resolution
__lowerCamelCase = do_resize
__lowerCamelCase = {"""shortest_edge""": 32, """longest_edge""": 13_33} if size is None else size
__lowerCamelCase = do_normalize
__lowerCamelCase = image_mean
__lowerCamelCase = image_std
__lowerCamelCase = class_info_file
__lowerCamelCase = prepare_metadata(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = num_text
__lowerCamelCase = repo_path
# for the post_process_functions
__lowerCamelCase = 2
__lowerCamelCase = 10
__lowerCamelCase = 10
__lowerCamelCase = 3
__lowerCamelCase = 4
__lowerCamelCase = num_labels
__lowerCamelCase = do_reduce_labels
__lowerCamelCase = ignore_index
def lowerCAmelCase__ ( self: Any ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: List[str]=False ):
if not batched:
__lowerCamelCase = image_inputs[0]
if isinstance(UpperCamelCase_ , Image.Image ):
__lowerCamelCase, __lowerCamelCase = image.size
else:
__lowerCamelCase, __lowerCamelCase = image.shape[1], image.shape[2]
if w < h:
__lowerCamelCase = int(self.size["""shortest_edge"""] * h / w )
__lowerCamelCase = self.size["""shortest_edge"""]
elif w > h:
__lowerCamelCase = self.size["""shortest_edge"""]
__lowerCamelCase = int(self.size["""shortest_edge"""] * w / h )
else:
__lowerCamelCase = self.size["""shortest_edge"""]
__lowerCamelCase = self.size["""shortest_edge"""]
else:
__lowerCamelCase = []
for image in image_inputs:
__lowerCamelCase, __lowerCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCamelCase = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[0] )[0]
__lowerCamelCase = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[1] )[1]
return expected_height, expected_width
def lowerCAmelCase__ ( self: int ):
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Optional[int] = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
UpperCAmelCase__ : Any = image_processing_class
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = OneFormerImageProcessorTester(self )
@property
def lowerCAmelCase__ ( self: Union[str, Any] ):
return self.image_processing_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , """image_mean""" ) )
self.assertTrue(hasattr(UpperCamelCase_ , """image_std""" ) )
self.assertTrue(hasattr(UpperCamelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCamelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCamelCase_ , """size""" ) )
self.assertTrue(hasattr(UpperCamelCase_ , """ignore_index""" ) )
self.assertTrue(hasattr(UpperCamelCase_ , """class_info_file""" ) )
self.assertTrue(hasattr(UpperCamelCase_ , """num_text""" ) )
self.assertTrue(hasattr(UpperCamelCase_ , """repo_path""" ) )
self.assertTrue(hasattr(UpperCamelCase_ , """metadata""" ) )
self.assertTrue(hasattr(UpperCamelCase_ , """do_reduce_labels""" ) )
def lowerCAmelCase__ ( self: int ):
pass
def lowerCAmelCase__ ( self: Any ):
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
__lowerCamelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
__lowerCamelCase, __lowerCamelCase = self.image_processing_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase, __lowerCamelCase = self.image_processing_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
__lowerCamelCase = image_processor(
UpperCamelCase_ , ["""semantic"""] * len(UpperCamelCase_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self: Tuple ):
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
__lowerCamelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
__lowerCamelCase, __lowerCamelCase = self.image_processing_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase, __lowerCamelCase = self.image_processing_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
__lowerCamelCase = image_processor(
UpperCamelCase_ , ["""semantic"""] * len(UpperCamelCase_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self: Union[str, Any] ):
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
__lowerCamelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
__lowerCamelCase, __lowerCamelCase = self.image_processing_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase, __lowerCamelCase = self.image_processing_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
__lowerCamelCase = image_processor(
UpperCamelCase_ , ["""semantic"""] * len(UpperCamelCase_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: str=False , UpperCamelCase_: str=False , UpperCamelCase_: Dict="np" ):
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
__lowerCamelCase = self.image_processing_tester.num_labels
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase_ )
if with_segmentation_maps:
__lowerCamelCase = num_labels
if is_instance_map:
__lowerCamelCase = list(range(UpperCamelCase_ ) ) * 2
__lowerCamelCase = dict(enumerate(UpperCamelCase_ ) )
__lowerCamelCase = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
__lowerCamelCase = [Image.fromarray(UpperCamelCase_ ) for annotation in annotations]
__lowerCamelCase = image_processor(
UpperCamelCase_ , ["""semantic"""] * len(UpperCamelCase_ ) , UpperCamelCase_ , return_tensors="""pt""" , instance_id_to_semantic_id=UpperCamelCase_ , pad_and_return_pixel_mask=UpperCamelCase_ , )
return inputs
def lowerCAmelCase__ ( self: List[Any] ):
pass
def lowerCAmelCase__ ( self: Tuple ):
def common(UpperCamelCase_: Any=False , UpperCamelCase_: Tuple=None ):
__lowerCamelCase = self.comm_get_image_processor_inputs(
with_segmentation_maps=UpperCamelCase_ , is_instance_map=UpperCamelCase_ , segmentation_type=UpperCamelCase_ )
__lowerCamelCase = inputs["""mask_labels"""]
__lowerCamelCase = inputs["""class_labels"""]
__lowerCamelCase = inputs["""pixel_values"""]
__lowerCamelCase = inputs["""text_inputs"""]
# check the batch_size
for mask_label, class_label, text_input in zip(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(UpperCamelCase_ ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=UpperCamelCase_ )
common(is_instance_map=UpperCamelCase_ , segmentation_type="""pil""" )
common(is_instance_map=UpperCamelCase_ , segmentation_type="""pil""" )
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = np.zeros((20, 50) )
__lowerCamelCase = 1
__lowerCamelCase = 1
__lowerCamelCase = 1
__lowerCamelCase = binary_mask_to_rle(UpperCamelCase_ )
self.assertEqual(len(UpperCamelCase_ ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
__lowerCamelCase = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCamelCase = fature_extractor.post_process_semantic_segmentation(UpperCamelCase_ )
self.assertEqual(len(UpperCamelCase_ ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
__lowerCamelCase = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
__lowerCamelCase = fature_extractor.post_process_semantic_segmentation(UpperCamelCase_ , target_sizes=UpperCamelCase_ )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
__lowerCamelCase = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCamelCase = image_processor.post_process_instance_segmentation(UpperCamelCase_ , threshold=0 )
self.assertTrue(len(UpperCamelCase_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , UpperCamelCase_ )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
__lowerCamelCase = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCamelCase = image_processor.post_process_panoptic_segmentation(UpperCamelCase_ , threshold=0 )
self.assertTrue(len(UpperCamelCase_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , UpperCamelCase_ )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 350
|
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = [[0 for _ in range(A__ )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__lowerCamelCase = 1
for n in range(m + 1 ):
for k in range(1 , A__ ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
UpperCAmelCase_ = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
UpperCAmelCase_ = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 29
| 0
|
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ = logging.getLogger(__name__)
@dataclass(frozen=__lowerCamelCase)
class lowerCamelCase__:
UpperCAmelCase__ : str
UpperCAmelCase__ : str
UpperCAmelCase__ : Optional[str] = None
UpperCAmelCase__ : Optional[str] = None
UpperCAmelCase__ : Optional[str] = None
@dataclass(frozen=__lowerCamelCase)
class lowerCamelCase__:
UpperCAmelCase__ : List[int]
UpperCAmelCase__ : Optional[List[int]] = None
UpperCAmelCase__ : Optional[List[int]] = None
UpperCAmelCase__ : Optional[Union[int, float]] = None
UpperCAmelCase__ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : List[InputFeatures]
def __init__( self: Tuple , UpperCamelCase_: str , UpperCamelCase_: PreTrainedTokenizer , UpperCamelCase_: str , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[Any]=False , UpperCamelCase_: bool = False , ):
__lowerCamelCase = hans_processors[task]()
__lowerCamelCase = os.path.join(
UpperCamelCase_ , """cached_{}_{}_{}_{}""".format(
"""dev""" if evaluate else """train""" , tokenizer.__class__.__name__ , str(UpperCamelCase_ ) , UpperCamelCase_ , ) , )
__lowerCamelCase = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__lowerCamelCase, __lowerCamelCase = label_list[2], label_list[1]
__lowerCamelCase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowerCamelCase = cached_features_file + """.lock"""
with FileLock(UpperCamelCase_ ):
if os.path.exists(UpperCamelCase_ ) and not overwrite_cache:
logger.info(F'Loading features from cached file {cached_features_file}' )
__lowerCamelCase = torch.load(UpperCamelCase_ )
else:
logger.info(F'Creating features from dataset file at {data_dir}' )
__lowerCamelCase = (
processor.get_dev_examples(UpperCamelCase_ ) if evaluate else processor.get_train_examples(UpperCamelCase_ )
)
logger.info("""Training examples: %s""" , len(UpperCamelCase_ ) )
__lowerCamelCase = hans_convert_examples_to_features(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
logger.info("""Saving features into cached file %s""" , UpperCamelCase_ )
torch.save(self.features , UpperCamelCase_ )
def __len__( self: Optional[int] ):
return len(self.features )
def __getitem__( self: Any , UpperCamelCase_: Optional[int] ):
return self.features[i]
def lowerCAmelCase__ ( self: Dict ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class lowerCamelCase__:
UpperCAmelCase__ : List[InputFeatures]
def __init__( self: Dict , UpperCamelCase_: str , UpperCamelCase_: PreTrainedTokenizer , UpperCamelCase_: str , UpperCamelCase_: Optional[int] = 1_28 , UpperCamelCase_: Dict=False , UpperCamelCase_: bool = False , ):
__lowerCamelCase = hans_processors[task]()
__lowerCamelCase = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__lowerCamelCase, __lowerCamelCase = label_list[2], label_list[1]
__lowerCamelCase = label_list
__lowerCamelCase = processor.get_dev_examples(UpperCamelCase_ ) if evaluate else processor.get_train_examples(UpperCamelCase_ )
__lowerCamelCase = hans_convert_examples_to_features(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="""convert examples to features""" ):
if ex_index % 1_00_00 == 0:
logger.info("""Writing example %d of %d""" % (ex_index, len(UpperCamelCase_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
__lowerCamelCase = tf.data.Dataset.from_generator(
UpperCamelCase_ , (
{
"""example_id""": tf.intaa,
"""input_ids""": tf.intaa,
"""attention_mask""": tf.intaa,
"""token_type_ids""": tf.intaa,
},
tf.intaa,
) , (
{
"""example_id""": tf.TensorShape([] ),
"""input_ids""": tf.TensorShape([None, None] ),
"""attention_mask""": tf.TensorShape([None, None] ),
"""token_type_ids""": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def lowerCAmelCase__ ( self: Optional[int] ):
return self.dataset
def __len__( self: Tuple ):
return len(self.features )
def __getitem__( self: Dict , UpperCamelCase_: List[Any] ):
return self.features[i]
def lowerCAmelCase__ ( self: Dict ):
return self.label_list
class lowerCamelCase__( __lowerCamelCase):
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: List[Any] ):
return self._create_examples(self._read_tsv(os.path.join(UpperCamelCase_ , """heuristics_train_set.txt""" ) ) , """train""" )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: int ):
return self._create_examples(self._read_tsv(os.path.join(UpperCamelCase_ , """heuristics_evaluation_set.txt""" ) ) , """dev""" )
def lowerCAmelCase__ ( self: List[Any] ):
return ["contradiction", "entailment", "neutral"]
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: Any , UpperCamelCase_: List[Any] ):
__lowerCamelCase = []
for i, line in enumerate(UpperCamelCase_ ):
if i == 0:
continue
__lowerCamelCase = """%s-%s""" % (set_type, line[0])
__lowerCamelCase = line[5]
__lowerCamelCase = line[6]
__lowerCamelCase = line[7][2:] if line[7].startswith("""ex""" ) else line[7]
__lowerCamelCase = line[0]
examples.append(InputExample(guid=UpperCamelCase_ , text_a=UpperCamelCase_ , text_b=UpperCamelCase_ , label=UpperCamelCase_ , pairID=UpperCamelCase_ ) )
return examples
def lowerCamelCase__ ( A__ : List[InputExample] , A__ : List[str] , A__ : int , A__ : PreTrainedTokenizer , ):
'''simple docstring'''
__lowerCamelCase = {label: i for i, label in enumerate(A__ )}
__lowerCamelCase = []
for ex_index, example in tqdm.tqdm(enumerate(A__ ) , desc="""convert examples to features""" ):
if ex_index % 10000 == 0:
logger.info("""Writing example %d""" % (ex_index) )
__lowerCamelCase = tokenizer(
example.text_a , example.text_b , add_special_tokens=A__ , max_length=A__ , padding="""max_length""" , truncation=A__ , return_overflowing_tokens=A__ , )
__lowerCamelCase = label_map[example.label] if example.label in label_map else 0
__lowerCamelCase = int(example.pairID )
features.append(InputFeatures(**A__ , label=A__ , pairID=A__ ) )
for i, example in enumerate(examples[:5] ):
logger.info("""*** Example ***""" )
logger.info(f'guid: {example}' )
logger.info(f'features: {features[i]}' )
return features
UpperCAmelCase_ = {
'hans': 3,
}
UpperCAmelCase_ = {
'hans': HansProcessor,
}
| 351
|
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase)
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: Tuple , *UpperCamelCase_: Dict , **UpperCamelCase_: Optional[int] ):
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
requires_backends(self , """decord""" )
self.check_model_type(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: int=None , UpperCamelCase_: List[Any]=None , UpperCamelCase_: Optional[int]=None ):
__lowerCamelCase = {}
if frame_sampling_rate is not None:
__lowerCamelCase = frame_sampling_rate
if num_frames is not None:
__lowerCamelCase = num_frames
__lowerCamelCase = {}
if top_k is not None:
__lowerCamelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self: Any , UpperCamelCase_: Union[str, List[str]] , **UpperCamelCase_: str ):
return super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[str]=None , UpperCamelCase_: List[Any]=1 ):
if num_frames is None:
__lowerCamelCase = self.model.config.num_frames
if video.startswith("""http://""" ) or video.startswith("""https://""" ):
__lowerCamelCase = BytesIO(requests.get(UpperCamelCase_ ).content )
__lowerCamelCase = VideoReader(UpperCamelCase_ )
videoreader.seek(0 )
__lowerCamelCase = 0
__lowerCamelCase = num_frames * frame_sampling_rate - 1
__lowerCamelCase = np.linspace(UpperCamelCase_ , UpperCamelCase_ , num=UpperCamelCase_ , dtype=np.intaa )
__lowerCamelCase = videoreader.get_batch(UpperCamelCase_ ).asnumpy()
__lowerCamelCase = list(UpperCamelCase_ )
__lowerCamelCase = self.image_processor(UpperCamelCase_ , return_tensors=self.framework )
return model_inputs
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Any ):
__lowerCamelCase = self.model(**UpperCamelCase_ )
return model_outputs
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: List[Any] , UpperCamelCase_: Optional[int]=5 ):
if top_k > self.model.config.num_labels:
__lowerCamelCase = self.model.config.num_labels
if self.framework == "pt":
__lowerCamelCase = model_outputs.logits.softmax(-1 )[0]
__lowerCamelCase, __lowerCamelCase = probs.topk(UpperCamelCase_ )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
__lowerCamelCase = scores.tolist()
__lowerCamelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase_ , UpperCamelCase_ )]
| 29
| 0
|
from __future__ import annotations
def lowerCamelCase__ ( A__ : float , A__ : float , A__ : float , ):
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative in a semiconductor""" )
elif hole_conc < 0:
raise ValueError("""Hole concentration cannot be negative in a semiconductor""" )
elif intrinsic_conc < 0:
raise ValueError(
"""Intrinsic concentration cannot be negative in a semiconductor""" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352
|
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase)
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: List[Any] , *UpperCamelCase_: Dict , **UpperCamelCase_: Dict ):
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
self.check_model_type(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: str=None , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: Optional[int]=None , **UpperCamelCase_: List[Any] ):
__lowerCamelCase, __lowerCamelCase = {}, {}
if padding is not None:
__lowerCamelCase = padding
if truncation is not None:
__lowerCamelCase = truncation
if top_k is not None:
__lowerCamelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self: Optional[Any] , UpperCamelCase_: Union["Image.Image", str] , UpperCamelCase_: str = None , **UpperCamelCase_: List[str] ):
if isinstance(UpperCamelCase_ , (Image.Image, str) ) and isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = {"""image""": image, """question""": question}
else:
__lowerCamelCase = image
__lowerCamelCase = super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
return results
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: str , UpperCamelCase_: Any=False , UpperCamelCase_: Optional[int]=False ):
__lowerCamelCase = load_image(inputs["""image"""] )
__lowerCamelCase = self.tokenizer(
inputs["""question"""] , return_tensors=self.framework , padding=UpperCamelCase_ , truncation=UpperCamelCase_ )
__lowerCamelCase = self.image_processor(images=UpperCamelCase_ , return_tensors=self.framework )
model_inputs.update(UpperCamelCase_ )
return model_inputs
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: Tuple ):
__lowerCamelCase = self.model(**UpperCamelCase_ )
return model_outputs
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any]=5 ):
if top_k > self.model.config.num_labels:
__lowerCamelCase = self.model.config.num_labels
if self.framework == "pt":
__lowerCamelCase = model_outputs.logits.sigmoid()[0]
__lowerCamelCase, __lowerCamelCase = probs.topk(UpperCamelCase_ )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
__lowerCamelCase = scores.tolist()
__lowerCamelCase = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase_ , UpperCamelCase_ )]
| 29
| 0
|
"""simple docstring"""
from __future__ import annotations
UpperCAmelCase_ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCamelCase__:
def __init__( self: Tuple , UpperCamelCase_: dict[str, list[str]] , UpperCamelCase_: str ):
__lowerCamelCase = graph
# mapping node to its parent in resulting breadth first tree
__lowerCamelCase = {}
__lowerCamelCase = source_vertex
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = {self.source_vertex}
__lowerCamelCase = None
__lowerCamelCase = [self.source_vertex] # first in first out queue
while queue:
__lowerCamelCase = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(UpperCamelCase_ )
__lowerCamelCase = vertex
queue.append(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: str ):
if target_vertex == self.source_vertex:
return self.source_vertex
__lowerCamelCase = self.parent.get(UpperCamelCase_ )
if target_vertex_parent is None:
__lowerCamelCase = (
F'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(UpperCamelCase_ )
return self.shortest_path(UpperCamelCase_ ) + F'->{target_vertex}'
if __name__ == "__main__":
UpperCAmelCase_ = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 353
|
UpperCAmelCase_ = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
UpperCAmelCase_ = ['a', 'b', 'c', 'd', 'e']
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : Optional[int] , A__ : str ):
'''simple docstring'''
__lowerCamelCase = start
# add current to visited
visited.append(A__ )
__lowerCamelCase = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__lowerCamelCase = topological_sort(A__ , A__ , A__ )
# if all neighbors visited add current to sort
sort.append(A__ )
# if all vertices haven't been visited select a new one to visit
if len(A__ ) != len(A__ ):
for vertice in vertices:
if vertice not in visited:
__lowerCamelCase = topological_sort(A__ , A__ , A__ )
# return sort
return sort
if __name__ == "__main__":
UpperCAmelCase_ = topological_sort('a', [], [])
print(sort)
| 29
| 0
|
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
UpperCAmelCase_ = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'
}
def lowerCamelCase__ ( A__ : str = "dhaka" , A__ : int = 5 ):
'''simple docstring'''
__lowerCamelCase = min(A__ , 50 ) # Prevent abuse!
__lowerCamelCase = {
"""q""": query,
"""tbm""": """isch""",
"""hl""": """en""",
"""ijn""": """0""",
}
__lowerCamelCase = requests.get("""https://www.google.com/search""" , params=A__ , headers=A__ )
__lowerCamelCase = BeautifulSoup(html.text , """html.parser""" )
__lowerCamelCase = """""".join(
re.findall(R"""AF_initDataCallback\(([^<]+)\);""" , str(soup.select("""script""" ) ) ) )
__lowerCamelCase = json.dumps(A__ )
__lowerCamelCase = json.loads(A__ )
__lowerCamelCase = re.findall(
R"""\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",""" , A__ , )
if not matched_google_image_data:
return 0
__lowerCamelCase = re.sub(
R"""\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]""" , """""" , str(A__ ) , )
__lowerCamelCase = re.findall(
R"""(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]""" , A__ , )
for index, fixed_full_res_image in enumerate(A__ ):
if index >= max_images:
return index
__lowerCamelCase = bytes(A__ , """ascii""" ).decode(
"""unicode-escape""" )
__lowerCamelCase = bytes(A__ , """ascii""" ).decode(
"""unicode-escape""" )
__lowerCamelCase = urllib.request.build_opener()
__lowerCamelCase = [
(
"""User-Agent""",
"""Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"""
""" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582""",
)
]
urllib.request.install_opener(A__ )
__lowerCamelCase = f'query_{query.replace(" " , "_" )}'
if not os.path.exists(A__ ):
os.makedirs(A__ )
urllib.request.urlretrieve( # noqa: S310
A__ , f'{path_name}/original_size_img_{index}.jpg' )
return index
if __name__ == "__main__":
try:
UpperCAmelCase_ = download_images_from_google_query(sys.argv[1])
print(f"""{image_count} images were downloaded to disk.""")
except IndexError:
print('Please provide a search term.')
raise
| 354
|
import requests
from bsa import BeautifulSoup
def lowerCamelCase__ ( A__ : str = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
__lowerCamelCase = BeautifulSoup(requests.get(A__ ).text , """html.parser""" )
__lowerCamelCase = soup.findAll("""h1""" )
__lowerCamelCase = soup.findAll("""div""" , {"""class""": """maincounter-number"""} )
keys += soup.findAll("""span""" , {"""class""": """panel-title"""} )
values += soup.findAll("""div""" , {"""class""": """number-table-main"""} )
return {key.text.strip(): value.text.strip() for key, value in zip(A__ , A__ )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(f"""{key}\n{value}\n""")
| 29
| 0
|
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
UpperCAmelCase_ = logging.getLogger(__name__)
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Any = 'summarization'
UpperCAmelCase__ : List[str] = ['loss']
UpperCAmelCase__ : Union[str, Any] = ROUGE_KEYS
UpperCAmelCase__ : List[str] = 'rouge2'
def __init__( self: Optional[Any] , UpperCamelCase_: Optional[int] , **UpperCamelCase_: Any ):
if hparams.sortish_sampler and hparams.gpus > 1:
__lowerCamelCase = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(UpperCamelCase_ , num_labels=UpperCamelCase_ , mode=self.mode , **UpperCamelCase_ )
use_task_specific_params(self.model , """summarization""" )
save_git_info(self.hparams.output_dir )
__lowerCamelCase = Path(self.output_dir ) / """metrics.json"""
__lowerCamelCase = Path(self.output_dir ) / """hparams.pkl"""
pickle_save(self.hparams , self.hparams_save_path )
__lowerCamelCase = 0
__lowerCamelCase = defaultdict(UpperCamelCase_ )
__lowerCamelCase = self.config.model_type
__lowerCamelCase = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size
__lowerCamelCase = {
"""data_dir""": self.hparams.data_dir,
"""max_source_length""": self.hparams.max_source_length,
"""prefix""": self.model.config.prefix or """""",
}
__lowerCamelCase = {
"""train""": self.hparams.n_train,
"""val""": self.hparams.n_val,
"""test""": self.hparams.n_test,
}
__lowerCamelCase = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
__lowerCamelCase = {
"""train""": self.hparams.max_target_length,
"""val""": self.hparams.val_max_target_length,
"""test""": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'target_lens: {self.target_lens}'
assert self.target_lens["train"] <= self.target_lens["test"], F'target_lens: {self.target_lens}'
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
__lowerCamelCase = get_git_info()["""repo_sha"""]
__lowerCamelCase = hparams.num_workers
__lowerCamelCase = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , UpperCamelCase_ ):
__lowerCamelCase = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
__lowerCamelCase = self.decoder_start_token_id
__lowerCamelCase = (
SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
__lowerCamelCase = False
__lowerCamelCase = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
__lowerCamelCase = self.hparams.eval_max_gen_length
else:
__lowerCamelCase = self.model.config.max_length
__lowerCamelCase = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Dict[str, torch.Tensor] ):
__lowerCamelCase = {
k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items()
}
save_json(UpperCamelCase_ , Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" )
__lowerCamelCase = True
return readable_batch
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: List[str] , **UpperCamelCase_: List[str] ):
return self.model(UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: List[int] ):
__lowerCamelCase = self.tokenizer.batch_decode(
UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
return lmap(str.strip , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: dict ):
__lowerCamelCase = self.tokenizer.pad_token_id
__lowerCamelCase, __lowerCamelCase = batch["""input_ids"""], batch["""attention_mask"""]
__lowerCamelCase = batch["""labels"""]
if isinstance(self.model , UpperCamelCase_ ):
__lowerCamelCase = self.model._shift_right(UpperCamelCase_ )
else:
__lowerCamelCase = shift_tokens_right(UpperCamelCase_ , UpperCamelCase_ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
__lowerCamelCase = decoder_input_ids
self.save_readable_batch(UpperCamelCase_ )
__lowerCamelCase = self(UpperCamelCase_ , attention_mask=UpperCamelCase_ , decoder_input_ids=UpperCamelCase_ , use_cache=UpperCamelCase_ )
__lowerCamelCase = outputs["""logits"""]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
__lowerCamelCase = nn.CrossEntropyLoss(ignore_index=UpperCamelCase_ )
assert lm_logits.shape[-1] == self.vocab_size
__lowerCamelCase = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
__lowerCamelCase = nn.functional.log_softmax(UpperCamelCase_ , dim=-1 )
__lowerCamelCase, __lowerCamelCase = label_smoothed_nll_loss(
UpperCamelCase_ , UpperCamelCase_ , self.hparams.label_smoothing , ignore_index=UpperCamelCase_ )
return (loss,)
@property
def lowerCAmelCase__ ( self: Any ):
return self.tokenizer.pad_token_id
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Tuple ):
__lowerCamelCase = self._step(UpperCamelCase_ )
__lowerCamelCase = dict(zip(self.loss_names , UpperCamelCase_ ) )
# tokens per batch
__lowerCamelCase = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum()
__lowerCamelCase = batch["""input_ids"""].shape[0]
__lowerCamelCase = batch["""input_ids"""].eq(self.pad ).sum()
__lowerCamelCase = batch["""input_ids"""].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: str ):
return self._generative_step(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: List[Any] , UpperCamelCase_: Optional[Any]="val" ):
self.step_count += 1
__lowerCamelCase = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
__lowerCamelCase = losses["""loss"""]
__lowerCamelCase = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""]
}
__lowerCamelCase = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
__lowerCamelCase = torch.tensor(UpperCamelCase_ ).type_as(UpperCamelCase_ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(UpperCamelCase_ )
__lowerCamelCase = {F'{prefix}_avg_{k}': x for k, x in losses.items()}
__lowerCamelCase = self.step_count
self.metrics[prefix].append(UpperCamelCase_ ) # callback writes this to self.metrics_save_path
__lowerCamelCase = flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'{prefix}_loss': loss,
F'{prefix}_{self.val_metric}': metric_tensor,
}
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[int] ):
return calculate_rouge(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: dict ):
__lowerCamelCase = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
__lowerCamelCase = self.model.generate(
batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=UpperCamelCase_ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
__lowerCamelCase = (time.time() - ta) / batch["""input_ids"""].shape[0]
__lowerCamelCase = self.ids_to_clean_text(UpperCamelCase_ )
__lowerCamelCase = self.ids_to_clean_text(batch["""labels"""] )
__lowerCamelCase = self._step(UpperCamelCase_ )
__lowerCamelCase = dict(zip(self.loss_names , UpperCamelCase_ ) )
__lowerCamelCase = self.calc_generative_metrics(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = np.mean(lmap(UpperCamelCase_ , UpperCamelCase_ ) )
base_metrics.update(gen_time=UpperCamelCase_ , gen_len=UpperCamelCase_ , preds=UpperCamelCase_ , target=UpperCamelCase_ , **UpperCamelCase_ )
return base_metrics
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Any , UpperCamelCase_: Dict ):
return self._generative_step(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Tuple ):
return self.validation_epoch_end(UpperCamelCase_ , prefix="""test""" )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: str ):
__lowerCamelCase = self.n_obs[type_path]
__lowerCamelCase = self.target_lens[type_path]
__lowerCamelCase = self.dataset_class(
self.tokenizer , type_path=UpperCamelCase_ , n_obs=UpperCamelCase_ , max_target_length=UpperCamelCase_ , **self.dataset_kwargs , )
return dataset
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: int , UpperCamelCase_: bool = False ):
__lowerCamelCase = self.get_dataset(UpperCamelCase_ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
__lowerCamelCase = dataset.make_sortish_sampler(UpperCamelCase_ , distributed=self.hparams.gpus > 1 )
return DataLoader(
UpperCamelCase_ , batch_size=UpperCamelCase_ , collate_fn=dataset.collate_fn , shuffle=UpperCamelCase_ , num_workers=self.num_workers , sampler=UpperCamelCase_ , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
__lowerCamelCase = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
UpperCamelCase_ , batch_sampler=UpperCamelCase_ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
UpperCamelCase_ , batch_size=UpperCamelCase_ , collate_fn=dataset.collate_fn , shuffle=UpperCamelCase_ , num_workers=self.num_workers , sampler=UpperCamelCase_ , )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=UpperCamelCase_ )
return dataloader
def lowerCAmelCase__ ( self: Union[str, Any] ):
return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size )
def lowerCAmelCase__ ( self: Any ):
return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def lowerCAmelCase__ ( UpperCamelCase_: Any , UpperCamelCase_: int ):
BaseTransformer.add_model_specific_args(UpperCamelCase_ , UpperCamelCase_ )
add_generic_args(UpperCamelCase_ , UpperCamelCase_ )
parser.add_argument(
"""--max_source_length""" , default=10_24 , type=UpperCamelCase_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--max_target_length""" , default=56 , type=UpperCamelCase_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--val_max_target_length""" , default=1_42 , type=UpperCamelCase_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--test_max_target_length""" , default=1_42 , type=UpperCamelCase_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument("""--freeze_encoder""" , action="""store_true""" )
parser.add_argument("""--freeze_embeds""" , action="""store_true""" )
parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=UpperCamelCase_ )
parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=UpperCamelCase_ )
parser.add_argument("""--max_tokens_per_batch""" , type=UpperCamelCase_ , default=UpperCamelCase_ )
parser.add_argument("""--logger_name""" , type=UpperCamelCase_ , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" )
parser.add_argument("""--n_train""" , type=UpperCamelCase_ , default=-1 , required=UpperCamelCase_ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""" , type=UpperCamelCase_ , default=5_00 , required=UpperCamelCase_ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""" , type=UpperCamelCase_ , default=-1 , required=UpperCamelCase_ , help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""" , type=UpperCamelCase_ , default="""summarization""" , required=UpperCamelCase_ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""" , type=UpperCamelCase_ , default=0.0 , required=UpperCamelCase_ )
parser.add_argument("""--src_lang""" , type=UpperCamelCase_ , default="""""" , required=UpperCamelCase_ )
parser.add_argument("""--tgt_lang""" , type=UpperCamelCase_ , default="""""" , required=UpperCamelCase_ )
parser.add_argument("""--eval_beams""" , type=UpperCamelCase_ , default=UpperCamelCase_ , required=UpperCamelCase_ )
parser.add_argument(
"""--val_metric""" , type=UpperCamelCase_ , default=UpperCamelCase_ , required=UpperCamelCase_ , choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""" , type=UpperCamelCase_ , default=UpperCamelCase_ , help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""" , type=UpperCamelCase_ , default=1 , required=UpperCamelCase_ , help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""" , type=UpperCamelCase_ , default=-1 , required=UpperCamelCase_ , help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
) , )
return parser
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Optional[int] = 'translation'
UpperCAmelCase__ : Any = ['loss']
UpperCAmelCase__ : List[str] = ['bleu']
UpperCAmelCase__ : List[str] = 'bleu'
def __init__( self: Tuple , UpperCamelCase_: Union[str, Any] , **UpperCamelCase_: Dict ):
super().__init__(UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = hparams.src_lang
__lowerCamelCase = hparams.tgt_lang
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: int , UpperCamelCase_: Any ):
return calculate_bleu(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( A__ : Tuple , A__ : Union[str, Any]=None ):
Path(args.output_dir ).mkdir(exist_ok=A__ )
check_output_dir(A__ , expected_items=3 )
if model is None:
if "summarization" in args.task:
__lowerCamelCase = SummarizationModule(A__ )
else:
__lowerCamelCase = TranslationModule(A__ )
__lowerCamelCase = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("""/tmp""" )
or str(args.output_dir ).startswith("""/var""" )
):
__lowerCamelCase = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
__lowerCamelCase = os.environ.get("""WANDB_PROJECT""" , A__ )
__lowerCamelCase = WandbLogger(name=model.output_dir.name , project=A__ )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
__lowerCamelCase = WandbLogger(name=model.output_dir.name , project=f'hf_{dataset}' )
if args.early_stopping_patience >= 0:
__lowerCamelCase = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
__lowerCamelCase = False
__lowerCamelCase = args.val_metric == """loss"""
__lowerCamelCase = generic_train(
A__ , A__ , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , A__ ) , early_stopping_callback=A__ , logger=A__ , )
pickle_save(model.hparams , model.output_dir / """hparams.pkl""" )
if not args.do_predict:
return model
__lowerCamelCase = """"""
__lowerCamelCase = sorted(glob.glob(os.path.join(args.output_dir , """*.ckpt""" ) , recursive=A__ ) )
if checkpoints:
__lowerCamelCase = checkpoints[-1]
__lowerCamelCase = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
UpperCAmelCase_ = pl.Trainer.add_argparse_args(parser)
UpperCAmelCase_ = SummarizationModule.add_model_specific_args(parser, os.getcwd())
UpperCAmelCase_ = parser.parse_args()
main(args)
| 355
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Union[str, Any] = 'yolos'
def __init__( self: Dict , UpperCamelCase_: List[Any]=7_68 , UpperCamelCase_: Tuple=12 , UpperCamelCase_: int=12 , UpperCamelCase_: int=30_72 , UpperCamelCase_: List[str]="gelu" , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: int=0.0 , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: Dict=1E-12 , UpperCamelCase_: List[Any]=[5_12, 8_64] , UpperCamelCase_: Optional[int]=16 , UpperCamelCase_: Any=3 , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: List[str]=1_00 , UpperCamelCase_: List[str]=True , UpperCamelCase_: Any=False , UpperCamelCase_: Optional[Any]=1 , UpperCamelCase_: Any=5 , UpperCamelCase_: Any=2 , UpperCamelCase_: Tuple=5 , UpperCamelCase_: str=2 , UpperCamelCase_: Any=0.1 , **UpperCamelCase_: Any , ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = qkv_bias
__lowerCamelCase = num_detection_tokens
__lowerCamelCase = use_mid_position_embeddings
__lowerCamelCase = auxiliary_loss
# Hungarian matcher
__lowerCamelCase = class_cost
__lowerCamelCase = bbox_cost
__lowerCamelCase = giou_cost
# Loss coefficients
__lowerCamelCase = bbox_loss_coefficient
__lowerCamelCase = giou_loss_coefficient
__lowerCamelCase = eos_coefficient
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = version.parse('1.11')
@property
def lowerCAmelCase__ ( self: Any ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase__ ( self: Dict ):
return 1E-4
@property
def lowerCAmelCase__ ( self: Dict ):
return 12
| 29
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimesformerModel',
'TimesformerForVideoClassification',
'TimesformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 356
|
import os
from math import logaa
def lowerCamelCase__ ( A__ : str = "base_exp.txt" ):
'''simple docstring'''
__lowerCamelCase = 0
__lowerCamelCase = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(A__ ) , A__ ) ) ):
__lowerCamelCase, __lowerCamelCase = list(map(A__ , line.split(""",""" ) ) )
if x * logaa(A__ ) > largest:
__lowerCamelCase = x * logaa(A__ )
__lowerCamelCase = i + 1
return result
if __name__ == "__main__":
print(solution())
| 29
| 0
|
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('0.12.2'):
raise Exception('requires fairseq >= 0.12.2')
if version.parse(fairseq.__version__) > version.parse('2'):
raise Exception('requires fairseq < v2')
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = 'Hello, World!'
UpperCAmelCase_ = 'en_XX'
def lowerCamelCase__ ( A__ : str , A__ : str , A__ : bool ):
'''simple docstring'''
__lowerCamelCase = Path("""data_bin""" )
__lowerCamelCase = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(A__ ).parent ) , checkpoint_file=Path(A__ ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(A__ ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(A__ ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , )
xmod.eval() # disable dropout
print(A__ )
__lowerCamelCase = xmod.model.encoder.sentence_encoder
__lowerCamelCase = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__lowerCamelCase = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , A__ )
__lowerCamelCase = XmodForSequenceClassification(A__ ) if classification_head else XmodForMaskedLM(A__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
__lowerCamelCase = xmod_sent_encoder.embed_tokens.weight
__lowerCamelCase = xmod_sent_encoder.embed_positions.weight
__lowerCamelCase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__lowerCamelCase = xmod_sent_encoder.layernorm_embedding.weight
__lowerCamelCase = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__lowerCamelCase = model.roberta.encoder.layer[i]
__lowerCamelCase = xmod_sent_encoder.layers[i]
# self attention
__lowerCamelCase = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
__lowerCamelCase = xmod_layer.self_attn.q_proj.weight
__lowerCamelCase = xmod_layer.self_attn.q_proj.bias
__lowerCamelCase = xmod_layer.self_attn.k_proj.weight
__lowerCamelCase = xmod_layer.self_attn.k_proj.bias
__lowerCamelCase = xmod_layer.self_attn.v_proj.weight
__lowerCamelCase = xmod_layer.self_attn.v_proj.bias
# self-attention output
__lowerCamelCase = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
__lowerCamelCase = xmod_layer.self_attn.out_proj.weight
__lowerCamelCase = xmod_layer.self_attn.out_proj.bias
__lowerCamelCase = xmod_layer.self_attn_layer_norm.weight
__lowerCamelCase = xmod_layer.self_attn_layer_norm.bias
# intermediate
__lowerCamelCase = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
__lowerCamelCase = xmod_layer.fca.weight
__lowerCamelCase = xmod_layer.fca.bias
# output
__lowerCamelCase = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
__lowerCamelCase = xmod_layer.fca.weight
__lowerCamelCase = xmod_layer.fca.bias
__lowerCamelCase = xmod_layer.final_layer_norm.weight
__lowerCamelCase = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__lowerCamelCase = xmod_layer.adapter_layer_norm.weight
__lowerCamelCase = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__lowerCamelCase = bert_output.adapter_modules[lang_code]
__lowerCamelCase = xmod_layer.adapter_modules[lang_code]
__lowerCamelCase = from_adapter.fca.weight
__lowerCamelCase = from_adapter.fca.bias
__lowerCamelCase = from_adapter.fca.weight
__lowerCamelCase = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__lowerCamelCase = xmod_sent_encoder.layer_norm.weight
__lowerCamelCase = xmod_sent_encoder.layer_norm.bias
if classification_head:
__lowerCamelCase = xmod.model.classification_heads["""mnli"""].dense.weight
__lowerCamelCase = xmod.model.classification_heads["""mnli"""].dense.bias
__lowerCamelCase = xmod.model.classification_heads["""mnli"""].out_proj.weight
__lowerCamelCase = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
__lowerCamelCase = xmod.model.encoder.lm_head.dense.weight
__lowerCamelCase = xmod.model.encoder.lm_head.dense.bias
__lowerCamelCase = xmod.model.encoder.lm_head.layer_norm.weight
__lowerCamelCase = xmod.model.encoder.lm_head.layer_norm.bias
__lowerCamelCase = xmod.model.encoder.lm_head.weight
__lowerCamelCase = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__lowerCamelCase = xmod.encode(A__ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(A__ )
__lowerCamelCase = model(A__ )[0]
if classification_head:
__lowerCamelCase = xmod.model.classification_heads["""mnli"""](xmod.extract_features(A__ ) )
else:
__lowerCamelCase = xmod.model(A__ , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__lowerCamelCase = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7
__lowerCamelCase = torch.allclose(A__ , A__ , atol=1E-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(A__ ).mkdir(parents=A__ , exist_ok=A__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(A__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xmod_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
UpperCAmelCase_ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 357
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCamelCase__ ( A__ : Tuple , A__ : Optional[int]=0.999 , A__ : Any="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(A__ : Any ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(A__ : Optional[int] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'Unsupported alpha_tranform_type: {alpha_transform_type}' )
__lowerCamelCase = []
for i in range(A__ ):
__lowerCamelCase = i / num_diffusion_timesteps
__lowerCamelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(A__ ) / alpha_bar_fn(A__ ) , A__ ) )
return torch.tensor(A__ , dtype=torch.floataa )
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
UpperCAmelCase__ : List[str] = [e.name for e in KarrasDiffusionSchedulers]
UpperCAmelCase__ : Any = 2
@register_to_config
def __init__( self: List[str] , UpperCamelCase_: int = 10_00 , UpperCamelCase_: float = 0.0_0085 , UpperCamelCase_: float = 0.012 , UpperCamelCase_: str = "linear" , UpperCamelCase_: Optional[Union[np.ndarray, List[float]]] = None , UpperCamelCase_: str = "epsilon" , UpperCamelCase_: str = "linspace" , UpperCamelCase_: int = 0 , ):
if trained_betas is not None:
__lowerCamelCase = torch.tensor(UpperCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "linear":
__lowerCamelCase = torch.linspace(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__lowerCamelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , UpperCamelCase_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__lowerCamelCase = betas_for_alpha_bar(UpperCamelCase_ )
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' )
__lowerCamelCase = 1.0 - self.betas
__lowerCamelCase = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[Any]=None ):
if schedule_timesteps is None:
__lowerCamelCase = self.timesteps
__lowerCamelCase = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__lowerCamelCase = 1 if len(UpperCamelCase_ ) > 1 else 0
else:
__lowerCamelCase = timestep.cpu().item() if torch.is_tensor(UpperCamelCase_ ) else timestep
__lowerCamelCase = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowerCAmelCase__ ( self: Optional[int] ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: Union[float, torch.FloatTensor] , ):
__lowerCamelCase = self.index_for_timestep(UpperCamelCase_ )
if self.state_in_first_order:
__lowerCamelCase = self.sigmas[step_index]
else:
__lowerCamelCase = self.sigmas_interpol[step_index]
__lowerCamelCase = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: Union[str, torch.device] = None , UpperCamelCase_: Optional[int] = None , ):
__lowerCamelCase = num_inference_steps
__lowerCamelCase = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__lowerCamelCase = np.linspace(0 , num_train_timesteps - 1 , UpperCamelCase_ , dtype=UpperCamelCase_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__lowerCamelCase = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowerCamelCase = (np.arange(0 , UpperCamelCase_ ) * step_ratio).round()[::-1].copy().astype(UpperCamelCase_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__lowerCamelCase = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowerCamelCase = (np.arange(UpperCamelCase_ , 0 , -step_ratio )).round().copy().astype(UpperCamelCase_ )
timesteps -= 1
else:
raise ValueError(
F'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
__lowerCamelCase = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__lowerCamelCase = torch.from_numpy(np.log(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = np.interp(UpperCamelCase_ , np.arange(0 , len(UpperCamelCase_ ) ) , UpperCamelCase_ )
__lowerCamelCase = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__lowerCamelCase = torch.from_numpy(UpperCamelCase_ ).to(device=UpperCamelCase_ )
# interpolate sigmas
__lowerCamelCase = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
__lowerCamelCase = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__lowerCamelCase = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(UpperCamelCase_ ).startswith("""mps""" ):
# mps does not support float64
__lowerCamelCase = torch.from_numpy(UpperCamelCase_ ).to(UpperCamelCase_ , dtype=torch.floataa )
else:
__lowerCamelCase = torch.from_numpy(UpperCamelCase_ ).to(UpperCamelCase_ )
# interpolate timesteps
__lowerCamelCase = self.sigma_to_t(UpperCamelCase_ ).to(UpperCamelCase_ , dtype=timesteps.dtype )
__lowerCamelCase = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
__lowerCamelCase = torch.cat([timesteps[:1], interleaved_timesteps] )
__lowerCamelCase = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__lowerCamelCase = defaultdict(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
# get log sigma
__lowerCamelCase = sigma.log()
# get distribution
__lowerCamelCase = log_sigma - self.log_sigmas[:, None]
# get sigmas range
__lowerCamelCase = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__lowerCamelCase = low_idx + 1
__lowerCamelCase = self.log_sigmas[low_idx]
__lowerCamelCase = self.log_sigmas[high_idx]
# interpolate sigmas
__lowerCamelCase = (low - log_sigma) / (low - high)
__lowerCamelCase = w.clamp(0 , 1 )
# transform interpolation to time range
__lowerCamelCase = (1 - w) * low_idx + w * high_idx
__lowerCamelCase = t.view(sigma.shape )
return t
@property
def lowerCAmelCase__ ( self: Dict ):
return self.sample is None
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Union[torch.FloatTensor, np.ndarray] , UpperCamelCase_: Union[float, torch.FloatTensor] , UpperCamelCase_: Union[torch.FloatTensor, np.ndarray] , UpperCamelCase_: bool = True , ):
__lowerCamelCase = self.index_for_timestep(UpperCamelCase_ )
# advance index counter by 1
__lowerCamelCase = timestep.cpu().item() if torch.is_tensor(UpperCamelCase_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__lowerCamelCase = self.sigmas[step_index]
__lowerCamelCase = self.sigmas_interpol[step_index + 1]
__lowerCamelCase = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__lowerCamelCase = self.sigmas[step_index - 1]
__lowerCamelCase = self.sigmas_interpol[step_index]
__lowerCamelCase = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__lowerCamelCase = 0
__lowerCamelCase = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__lowerCamelCase = sigma_hat if self.state_in_first_order else sigma_interpol
__lowerCamelCase = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__lowerCamelCase = sigma_hat if self.state_in_first_order else sigma_interpol
__lowerCamelCase = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("""prediction_type not implemented yet: sample""" )
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__lowerCamelCase = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__lowerCamelCase = sigma_interpol - sigma_hat
# store for 2nd order step
__lowerCamelCase = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__lowerCamelCase = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__lowerCamelCase = sigma_next - sigma_hat
__lowerCamelCase = self.sample
__lowerCamelCase = None
__lowerCamelCase = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__lowerCamelCase = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(UpperCamelCase_ ):
# mps does not support float64
__lowerCamelCase = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__lowerCamelCase = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__lowerCamelCase = self.timesteps.to(original_samples.device )
__lowerCamelCase = timesteps.to(original_samples.device )
__lowerCamelCase = [self.index_for_timestep(UpperCamelCase_ , UpperCamelCase_ ) for t in timesteps]
__lowerCamelCase = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__lowerCamelCase = sigma.unsqueeze(-1 )
__lowerCamelCase = original_samples + noise * sigma
return noisy_samples
def __len__( self: Tuple ):
return self.config.num_train_timesteps
| 29
| 0
|
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : "DiagonalGaussianDistribution"
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
UpperCAmelCase__ : Dict = True
@register_to_config
def __init__( self: Union[str, Any] , UpperCamelCase_: int = 3 , UpperCamelCase_: int = 3 , UpperCamelCase_: Tuple[str] = ("DownEncoderBlock2D",) , UpperCamelCase_: Tuple[str] = ("UpDecoderBlock2D",) , UpperCamelCase_: Tuple[int] = (64,) , UpperCamelCase_: int = 1 , UpperCamelCase_: str = "silu" , UpperCamelCase_: int = 4 , UpperCamelCase_: int = 32 , UpperCamelCase_: int = 32 , UpperCamelCase_: float = 0.1_8215 , ):
super().__init__()
# pass init params to Encoder
__lowerCamelCase = Encoder(
in_channels=UpperCamelCase_ , out_channels=UpperCamelCase_ , down_block_types=UpperCamelCase_ , block_out_channels=UpperCamelCase_ , layers_per_block=UpperCamelCase_ , act_fn=UpperCamelCase_ , norm_num_groups=UpperCamelCase_ , double_z=UpperCamelCase_ , )
# pass init params to Decoder
__lowerCamelCase = Decoder(
in_channels=UpperCamelCase_ , out_channels=UpperCamelCase_ , up_block_types=UpperCamelCase_ , block_out_channels=UpperCamelCase_ , layers_per_block=UpperCamelCase_ , norm_num_groups=UpperCamelCase_ , act_fn=UpperCamelCase_ , )
__lowerCamelCase = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
__lowerCamelCase = nn.Convad(UpperCamelCase_ , UpperCamelCase_ , 1 )
__lowerCamelCase = False
__lowerCamelCase = False
# only relevant if vae tiling is enabled
__lowerCamelCase = self.config.sample_size
__lowerCamelCase = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
__lowerCamelCase = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__lowerCamelCase = 0.25
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: Any=False ):
if isinstance(UpperCamelCase_ , (Encoder, Decoder) ):
__lowerCamelCase = value
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: bool = True ):
__lowerCamelCase = use_tiling
def lowerCAmelCase__ ( self: Optional[int] ):
self.enable_tiling(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = True
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = {}
def fn_recursive_add_processors(UpperCamelCase_: str , UpperCamelCase_: torch.nn.Module , UpperCamelCase_: Dict[str, AttentionProcessor] ):
if hasattr(UpperCamelCase_ , """set_processor""" ):
__lowerCamelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'{name}.{sub_name}' , UpperCamelCase_ , UpperCamelCase_ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return processors
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
__lowerCamelCase = len(self.attn_processors.keys() )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != count:
raise ValueError(
F'A dict of processors was passed, but the number of processors {len(UpperCamelCase_ )} does not match the'
F' number of attention layers: {count}. Please make sure to pass {count} processor classes.' )
def fn_recursive_attn_processor(UpperCamelCase_: str , UpperCamelCase_: torch.nn.Module , UpperCamelCase_: int ):
if hasattr(UpperCamelCase_ , """set_processor""" ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
module.set_processor(UpperCamelCase_ )
else:
module.set_processor(processor.pop(F'{name}.processor' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'{name}.{sub_name}' , UpperCamelCase_ , UpperCamelCase_ )
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] ):
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: bool = True ):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(UpperCamelCase_ , return_dict=UpperCamelCase_ )
if self.use_slicing and x.shape[0] > 1:
__lowerCamelCase = [self.encoder(UpperCamelCase_ ) for x_slice in x.split(1 )]
__lowerCamelCase = torch.cat(UpperCamelCase_ )
else:
__lowerCamelCase = self.encoder(UpperCamelCase_ )
__lowerCamelCase = self.quant_conv(UpperCamelCase_ )
__lowerCamelCase = DiagonalGaussianDistribution(UpperCamelCase_ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: bool = True ):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(UpperCamelCase_ , return_dict=UpperCamelCase_ )
__lowerCamelCase = self.post_quant_conv(UpperCamelCase_ )
__lowerCamelCase = self.decoder(UpperCamelCase_ )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase_ )
@apply_forward_hook
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: bool = True ):
if self.use_slicing and z.shape[0] > 1:
__lowerCamelCase = [self._decode(UpperCamelCase_ ).sample for z_slice in z.split(1 )]
__lowerCamelCase = torch.cat(UpperCamelCase_ )
else:
__lowerCamelCase = self._decode(UpperCamelCase_ ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: List[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: List[Any] ):
__lowerCamelCase = min(a.shape[2] , b.shape[2] , UpperCamelCase_ )
for y in range(UpperCamelCase_ ):
__lowerCamelCase = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = min(a.shape[3] , b.shape[3] , UpperCamelCase_ )
for x in range(UpperCamelCase_ ):
__lowerCamelCase = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def lowerCAmelCase__ ( self: int , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: bool = True ):
__lowerCamelCase = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__lowerCamelCase = int(self.tile_latent_min_size * self.tile_overlap_factor )
__lowerCamelCase = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__lowerCamelCase = []
for i in range(0 , x.shape[2] , UpperCamelCase_ ):
__lowerCamelCase = []
for j in range(0 , x.shape[3] , UpperCamelCase_ ):
__lowerCamelCase = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__lowerCamelCase = self.encoder(UpperCamelCase_ )
__lowerCamelCase = self.quant_conv(UpperCamelCase_ )
row.append(UpperCamelCase_ )
rows.append(UpperCamelCase_ )
__lowerCamelCase = []
for i, row in enumerate(UpperCamelCase_ ):
__lowerCamelCase = []
for j, tile in enumerate(UpperCamelCase_ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowerCamelCase = self.blend_v(rows[i - 1][j] , UpperCamelCase_ , UpperCamelCase_ )
if j > 0:
__lowerCamelCase = self.blend_h(row[j - 1] , UpperCamelCase_ , UpperCamelCase_ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(UpperCamelCase_ , dim=3 ) )
__lowerCamelCase = torch.cat(UpperCamelCase_ , dim=2 )
__lowerCamelCase = DiagonalGaussianDistribution(UpperCamelCase_ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: bool = True ):
__lowerCamelCase = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__lowerCamelCase = int(self.tile_sample_min_size * self.tile_overlap_factor )
__lowerCamelCase = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__lowerCamelCase = []
for i in range(0 , z.shape[2] , UpperCamelCase_ ):
__lowerCamelCase = []
for j in range(0 , z.shape[3] , UpperCamelCase_ ):
__lowerCamelCase = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__lowerCamelCase = self.post_quant_conv(UpperCamelCase_ )
__lowerCamelCase = self.decoder(UpperCamelCase_ )
row.append(UpperCamelCase_ )
rows.append(UpperCamelCase_ )
__lowerCamelCase = []
for i, row in enumerate(UpperCamelCase_ ):
__lowerCamelCase = []
for j, tile in enumerate(UpperCamelCase_ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowerCamelCase = self.blend_v(rows[i - 1][j] , UpperCamelCase_ , UpperCamelCase_ )
if j > 0:
__lowerCamelCase = self.blend_h(row[j - 1] , UpperCamelCase_ , UpperCamelCase_ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(UpperCamelCase_ , dim=3 ) )
__lowerCamelCase = torch.cat(UpperCamelCase_ , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: bool = False , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[torch.Generator] = None , ):
__lowerCamelCase = sample
__lowerCamelCase = self.encode(UpperCamelCase_ ).latent_dist
if sample_posterior:
__lowerCamelCase = posterior.sample(generator=UpperCamelCase_ )
else:
__lowerCamelCase = posterior.mode()
__lowerCamelCase = self.decode(UpperCamelCase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase_ )
| 358
|
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Union[str, Any] = IFImgaImgSuperResolutionPipeline
UpperCAmelCase__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'}
UpperCAmelCase__ : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'})
UpperCAmelCase__ : Tuple = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCAmelCase__ ( self: Optional[int] ):
return self._get_superresolution_dummy_components()
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Any , UpperCamelCase_: Dict=0 ):
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase__ ( self: Dict ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCAmelCase__ ( self: int ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCAmelCase__ ( self: Optional[Any] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCAmelCase__ ( self: Optional[Any] ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCAmelCase__ ( self: List[str] ):
self._test_save_load_local()
def lowerCAmelCase__ ( self: List[Any] ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 29
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'OPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OPTForCausalLM',
'OPTModel',
'OPTPreTrainedModel',
'OPTForSequenceClassification',
'OPTForQuestionAnswering',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'FlaxOPTForCausalLM',
'FlaxOPTModel',
'FlaxOPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 359
|
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
__lowerCamelCase = [False] * len(A__ )
__lowerCamelCase = [-1] * len(A__ )
def dfs(A__ : Optional[int] , A__ : Optional[int] ):
__lowerCamelCase = True
__lowerCamelCase = c
for u in graph[v]:
if not visited[u]:
dfs(A__ , 1 - c )
for i in range(len(A__ ) ):
if not visited[i]:
dfs(A__ , 0 )
for i in range(len(A__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
UpperCAmelCase_ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 29
| 0
|
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCAmelCase_ = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase_ = direct_transformers_import(PATH_TO_TRANSFORMERS)
UpperCAmelCase_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
UpperCAmelCase_ = {
# used to compute the property `self.chunk_length`
'EncodecConfig': ['overlap'],
# used as `self.bert_model = BertModel(config, ...)`
'DPRConfig': True,
# not used in modeling files, but it's an important information
'FSMTConfig': ['langs'],
# used internally in the configuration class file
'GPTNeoConfig': ['attention_types'],
# used internally in the configuration class file
'EsmConfig': ['is_folding_model'],
# used during training (despite we don't have training script for these models yet)
'Mask2FormerConfig': ['ignore_value'],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'OneFormerConfig': ['ignore_value', 'norm'],
# used during preprocessing and collation, see `collating_graphormer.py`
'GraphormerConfig': ['spatial_pos_max'],
# used internally in the configuration class file
'T5Config': ['feed_forward_proj'],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'MT5Config': ['feed_forward_proj', 'tokenizer_class'],
'UMT5Config': ['feed_forward_proj', 'tokenizer_class'],
# used internally in the configuration class file
'LongT5Config': ['feed_forward_proj'],
# used internally in the configuration class file
'SwitchTransformersConfig': ['feed_forward_proj'],
# having default values other than `1e-5` - we can't fix them without breaking
'BioGptConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'GLPNConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'SegformerConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'CvtConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'PerceiverConfig': ['layer_norm_eps'],
# used internally to calculate the feature size
'InformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'TimeSeriesTransformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'AutoformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate `mlp_dim`
'SamVisionConfig': ['mlp_ratio'],
# For (head) training, but so far not implemented
'ClapAudioConfig': ['num_classes'],
# Not used, but providing useful information to users
'SpeechT5HifiGanConfig': ['sampling_rate'],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'CLIPSegConfig': True,
'DeformableDetrConfig': True,
'DetaConfig': True,
'DinatConfig': True,
'DonutSwinConfig': True,
'EfficientFormerConfig': True,
'FSMTConfig': True,
'JukeboxConfig': True,
'LayoutLMv2Config': True,
'MaskFormerSwinConfig': True,
'MT5Config': True,
'NatConfig': True,
'OneFormerConfig': True,
'PerceiverConfig': True,
'RagConfig': True,
'SpeechT5Config': True,
'SwinConfig': True,
'Swin2SRConfig': True,
'Swinv2Config': True,
'SwitchTransformersConfig': True,
'TableTransformerConfig': True,
'TapasConfig': True,
'TransfoXLConfig': True,
'UniSpeechConfig': True,
'UniSpeechSatConfig': True,
'WavLMConfig': True,
'WhisperConfig': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'JukeboxPriorConfig': True,
# TODO: @Younes (for `is_decoder`)
'Pix2StructTextConfig': True,
}
)
def lowerCamelCase__ ( A__ : Tuple , A__ : List[Any] , A__ : int , A__ : int ):
'''simple docstring'''
__lowerCamelCase = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f'config.{attribute}' in modeling_source
or f'getattr(config, "{attribute}"' in modeling_source
or f'getattr(self.config, "{attribute}"' in modeling_source
):
__lowerCamelCase = True
# Deal with multi-line cases
elif (
re.search(
Rf'getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"' , A__ , )
is not None
):
__lowerCamelCase = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
__lowerCamelCase = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
__lowerCamelCase = [
"""bos_index""",
"""eos_index""",
"""pad_index""",
"""unk_index""",
"""mask_index""",
"""image_size""",
"""use_cache""",
"""out_features""",
"""out_indices""",
]
__lowerCamelCase = ["""encoder_no_repeat_ngram_size"""]
# Special cases to be allowed
__lowerCamelCase = True
if not attribute_used:
__lowerCamelCase = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
__lowerCamelCase = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
__lowerCamelCase = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
__lowerCamelCase = True
elif attribute.endswith("""_token_id""" ):
__lowerCamelCase = True
# configuration class specific cases
if not case_allowed:
__lowerCamelCase = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
__lowerCamelCase = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase = dict(inspect.signature(config_class.__init__ ).parameters )
__lowerCamelCase = [x for x in list(signature.keys() ) if x not in ["""self""", """kwargs"""]]
__lowerCamelCase = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
__lowerCamelCase = {}
if len(config_class.attribute_map ) > 0:
__lowerCamelCase = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
__lowerCamelCase = inspect.getsourcefile(A__ )
__lowerCamelCase = os.path.dirname(A__ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
__lowerCamelCase = [os.path.join(A__ , A__ ) for fn in os.listdir(A__ ) if fn.startswith("""modeling_""" )]
# Get the source code strings
__lowerCamelCase = []
for path in modeling_paths:
if os.path.isfile(A__ ):
with open(A__ ) as fp:
modeling_sources.append(fp.read() )
__lowerCamelCase = []
for config_param, default_value in zip(A__ , A__ ):
# `attributes` here is all the variant names for `config_param`
__lowerCamelCase = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(A__ , A__ , A__ , A__ ):
unused_attributes.append(attributes[0] )
return sorted(A__ )
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
__lowerCamelCase = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda A__ : inspect.isclass(A__ )
and issubclass(A__ , A__ )
and inspect.getmodule(A__ ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
__lowerCamelCase = check_config_attributes_being_used(A__ )
if len(A__ ) > 0:
__lowerCamelCase = unused_attributes
if len(A__ ) > 0:
__lowerCamelCase = """The following configuration classes contain unused attributes in the corresponding modeling files:\n"""
for name, attributes in configs_with_unused_attributes.items():
error += f'{name}: {attributes}\n'
raise ValueError(A__ )
if __name__ == "__main__":
check_config_attributes()
| 360
|
from __future__ import annotations
UpperCAmelCase_ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCamelCase__:
def __init__( self: Tuple , UpperCamelCase_: dict[str, list[str]] , UpperCamelCase_: str ):
__lowerCamelCase = graph
# mapping node to its parent in resulting breadth first tree
__lowerCamelCase = {}
__lowerCamelCase = source_vertex
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = {self.source_vertex}
__lowerCamelCase = None
__lowerCamelCase = [self.source_vertex] # first in first out queue
while queue:
__lowerCamelCase = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(UpperCamelCase_ )
__lowerCamelCase = vertex
queue.append(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: str ):
if target_vertex == self.source_vertex:
return self.source_vertex
__lowerCamelCase = self.parent.get(UpperCamelCase_ )
if target_vertex_parent is None:
__lowerCamelCase = (
F'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(UpperCamelCase_ )
return self.shortest_path(UpperCamelCase_ ) + F'->{target_vertex}'
if __name__ == "__main__":
UpperCAmelCase_ = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 29
| 0
|
def lowerCamelCase__ ( A__ : int , A__ : int ):
'''simple docstring'''
__lowerCamelCase = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
__lowerCamelCase = n - k
# Calculate C(n,k)
for i in range(A__ ):
result *= n - i
result //= i + 1
return result
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
return binomial_coefficient(2 * node_count , A__ ) // (node_count + 1)
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
if n < 0:
raise ValueError("""factorial() not defined for negative values""" )
__lowerCamelCase = 1
for i in range(1 , n + 1 ):
result *= i
return result
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
return catalan_number(A__ ) * factorial(A__ )
if __name__ == "__main__":
UpperCAmelCase_ = int(input('Enter the number of nodes: ').strip() or 0)
if node_count <= 0:
raise ValueError('We need some nodes to work with.')
print(
f"""Given {node_count} nodes, there are {binary_tree_count(node_count)} """
f"""binary trees and {catalan_number(node_count)} binary search trees."""
)
| 361
|
from math import ceil, sqrt
def lowerCamelCase__ ( A__ : int = 1000000 ):
'''simple docstring'''
__lowerCamelCase = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
__lowerCamelCase = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
__lowerCamelCase = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 29
| 0
|
import string
import numpy
def lowerCamelCase__ ( A__ : int , A__ : int ):
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a , A__ )
class lowerCamelCase__:
UpperCAmelCase__ : Optional[int] = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
UpperCAmelCase__ : Optional[int] = numpy.vectorize(lambda __lowerCamelCase: x % 36)
UpperCAmelCase__ : List[Any] = numpy.vectorize(__lowerCamelCase)
def __init__( self: List[Any] , UpperCamelCase_: numpy.ndarray ):
__lowerCamelCase = self.modulus(UpperCamelCase_ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__lowerCamelCase = encrypt_key.shape[0]
def lowerCAmelCase__ ( self: str , UpperCamelCase_: str ):
return self.key_string.index(UpperCamelCase_ )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: int ):
return self.key_string[round(UpperCamelCase_ )]
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__lowerCamelCase = det % len(self.key_string )
__lowerCamelCase = len(self.key_string )
if greatest_common_divisor(UpperCamelCase_ , len(self.key_string ) ) != 1:
__lowerCamelCase = (
F'determinant modular {req_l} of encryption key({det}) '
F'is not co prime w.r.t {req_l}.\nTry another key.'
)
raise ValueError(UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: str ):
__lowerCamelCase = [char for char in text.upper() if char in self.key_string]
__lowerCamelCase = chars[-1]
while len(UpperCamelCase_ ) % self.break_key != 0:
chars.append(UpperCamelCase_ )
return "".join(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
__lowerCamelCase = self.process_text(text.upper() )
__lowerCamelCase = """"""
for i in range(0 , len(UpperCamelCase_ ) - self.break_key + 1 , self.break_key ):
__lowerCamelCase = text[i : i + self.break_key]
__lowerCamelCase = [self.replace_letters(UpperCamelCase_ ) for char in batch]
__lowerCamelCase = numpy.array([vec] ).T
__lowerCamelCase = self.modulus(self.encrypt_key.dot(UpperCamelCase_ ) ).T.tolist()[
0
]
__lowerCamelCase = """""".join(
self.replace_digits(UpperCamelCase_ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__lowerCamelCase = det % len(self.key_string )
__lowerCamelCase = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
__lowerCamelCase = i
break
__lowerCamelCase = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
__lowerCamelCase = self.make_decrypt_key()
__lowerCamelCase = self.process_text(text.upper() )
__lowerCamelCase = """"""
for i in range(0 , len(UpperCamelCase_ ) - self.break_key + 1 , self.break_key ):
__lowerCamelCase = text[i : i + self.break_key]
__lowerCamelCase = [self.replace_letters(UpperCamelCase_ ) for char in batch]
__lowerCamelCase = numpy.array([vec] ).T
__lowerCamelCase = self.modulus(decrypt_key.dot(UpperCamelCase_ ) ).T.tolist()[0]
__lowerCamelCase = """""".join(
self.replace_digits(UpperCamelCase_ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = int(input("""Enter the order of the encryption key: """ ) )
__lowerCamelCase = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(A__ ):
__lowerCamelCase = [int(A__ ) for x in input().split()]
hill_matrix.append(A__ )
__lowerCamelCase = HillCipher(numpy.array(A__ ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
__lowerCamelCase = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
__lowerCamelCase = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(A__ ) )
elif option == "2":
__lowerCamelCase = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(A__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 362
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Optional[int] = IFInpaintingPipeline
UpperCAmelCase__ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
UpperCAmelCase__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase__ : List[str] = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCAmelCase__ ( self: List[str] ):
return self._get_dummy_components()
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Dict , UpperCamelCase_: str=0 ):
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase__ ( self: Union[str, Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCAmelCase__ ( self: Union[str, Any] ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCAmelCase__ ( self: Optional[int] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCAmelCase__ ( self: Any ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCAmelCase__ ( self: str ):
self._test_save_load_local()
def lowerCAmelCase__ ( self: str ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 29
| 0
|
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
__lowerCamelCase = {}
__lowerCamelCase = tokenizer(example["""content"""] , truncation=A__ )["""input_ids"""]
__lowerCamelCase = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
UpperCAmelCase_ = HfArgumentParser(PretokenizationArguments)
UpperCAmelCase_ = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase_ = multiprocessing.cpu_count()
UpperCAmelCase_ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCAmelCase_ = time.time()
UpperCAmelCase_ = load_dataset(args.dataset_name, split='train')
print(f"""Dataset loaded in {time.time()-t_start:.2f}s""")
UpperCAmelCase_ = time.time()
UpperCAmelCase_ = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(f"""Dataset tokenized in {time.time()-t_start:.2f}s""")
UpperCAmelCase_ = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
| 363
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase)
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: str , **UpperCamelCase_: int ):
super().__init__(**UpperCamelCase_ )
if self.framework == "tf":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
requires_backends(self , """vision""" )
self.check_model_type(UpperCamelCase_ )
def __call__( self: Union[str, Any] , UpperCamelCase_: Union[str, "Image.Image", List[Dict[str, Any]]] , UpperCamelCase_: Union[str, List[str]] = None , **UpperCamelCase_: List[str] , ):
if "text_queries" in kwargs:
__lowerCamelCase = kwargs.pop("""text_queries""" )
if isinstance(UpperCamelCase_ , (str, Image.Image) ):
__lowerCamelCase = {"""image""": image, """candidate_labels""": candidate_labels}
else:
__lowerCamelCase = image
__lowerCamelCase = super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
return results
def lowerCAmelCase__ ( self: List[str] , **UpperCamelCase_: Dict ):
__lowerCamelCase = {}
if "threshold" in kwargs:
__lowerCamelCase = kwargs["""threshold"""]
if "top_k" in kwargs:
__lowerCamelCase = kwargs["""top_k"""]
return {}, {}, postprocess_params
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = load_image(inputs["""image"""] )
__lowerCamelCase = inputs["""candidate_labels"""]
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = candidate_labels.split(""",""" )
__lowerCamelCase = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(UpperCamelCase_ ):
__lowerCamelCase = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework )
__lowerCamelCase = self.image_processor(UpperCamelCase_ , return_tensors=self.framework )
yield {
"is_last": i == len(UpperCamelCase_ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Tuple ):
__lowerCamelCase = model_inputs.pop("""target_size""" )
__lowerCamelCase = model_inputs.pop("""candidate_label""" )
__lowerCamelCase = model_inputs.pop("""is_last""" )
__lowerCamelCase = self.model(**UpperCamelCase_ )
__lowerCamelCase = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs}
return model_outputs
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: Dict=0.1 , UpperCamelCase_: Union[str, Any]=None ):
__lowerCamelCase = []
for model_output in model_outputs:
__lowerCamelCase = model_output["""candidate_label"""]
__lowerCamelCase = BaseModelOutput(UpperCamelCase_ )
__lowerCamelCase = self.image_processor.post_process_object_detection(
outputs=UpperCamelCase_ , threshold=UpperCamelCase_ , target_sizes=model_output["""target_size"""] )[0]
for index in outputs["scores"].nonzero():
__lowerCamelCase = outputs["""scores"""][index].item()
__lowerCamelCase = self._get_bounding_box(outputs["""boxes"""][index][0] )
__lowerCamelCase = {"""score""": score, """label""": label, """box""": box}
results.append(UpperCamelCase_ )
__lowerCamelCase = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : x["score"] , reverse=UpperCamelCase_ )
if top_k:
__lowerCamelCase = results[:top_k]
return results
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: "torch.Tensor" ):
if self.framework != "pt":
raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""" )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = box.int().tolist()
__lowerCamelCase = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 29
| 0
|
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : Union[str, Any] , A__ : Dict=8 ):
'''simple docstring'''
__lowerCamelCase : str = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__lowerCamelCase : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : str=512 , A__ : Tuple=512 ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
__lowerCamelCase : Union[str, Any] = np.array(pil_image.convert("""RGB""" ) )
__lowerCamelCase : int = arr.astype(np.floataa ) / 127.5 - 1
__lowerCamelCase : Union[str, Any] = np.transpose(A__ , [2, 0, 1] )
__lowerCamelCase : Optional[Any] = torch.from_numpy(A__ ).unsqueeze(0 )
return image
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: Optional[Any] , UpperCamelCase_: UNetaDConditionModel , UpperCamelCase_: DDPMScheduler , UpperCamelCase_: VQModel , ):
super().__init__()
self.register_modules(
unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , movq=UpperCamelCase_ , )
__lowerCamelCase : int = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: Optional[int] ):
# get the original timestep using init_timestep
__lowerCamelCase : str = min(int(num_inference_steps * strength ) , UpperCamelCase_ )
__lowerCamelCase : Union[str, Any] = max(num_inference_steps - init_timestep , 0 )
__lowerCamelCase : Any = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: List[str] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Any=None ):
if not isinstance(UpperCamelCase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(UpperCamelCase_ )}' )
__lowerCamelCase : Tuple = image.to(device=UpperCamelCase_ , dtype=UpperCamelCase_ )
__lowerCamelCase : Optional[Any] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
__lowerCamelCase : Tuple = image
else:
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(UpperCamelCase_ )}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase : Any = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(UpperCamelCase_ )
]
__lowerCamelCase : str = torch.cat(UpperCamelCase_ , dim=0 )
else:
__lowerCamelCase : Dict = self.movq.encode(UpperCamelCase_ ).latent_dist.sample(UpperCamelCase_ )
__lowerCamelCase : List[Any] = self.movq.config.scaling_factor * init_latents
__lowerCamelCase : Optional[Any] = torch.cat([init_latents] , dim=0 )
__lowerCamelCase : int = init_latents.shape
__lowerCamelCase : List[str] = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ )
# get latents
__lowerCamelCase : str = self.scheduler.add_noise(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase : Optional[int] = init_latents
return latents
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
__lowerCamelCase : Dict = torch.device(F'cuda:{gpu_id}' )
__lowerCamelCase : str = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Union[str, Any]=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
__lowerCamelCase : Optional[int] = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=UpperCamelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__lowerCamelCase : Union[str, Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
__lowerCamelCase, __lowerCamelCase : Optional[Any] = cpu_offload_with_hook(UpperCamelCase_ , UpperCamelCase_ , prev_module_hook=UpperCamelCase_ )
# We'll offload the last model manually.
__lowerCamelCase : Optional[int] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self: List[Any] ):
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase_ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCamelCase_ )
def __call__( self: Tuple , UpperCamelCase_: Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase_: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , UpperCamelCase_: Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase_: int = 5_12 , UpperCamelCase_: int = 5_12 , UpperCamelCase_: int = 1_00 , UpperCamelCase_: float = 4.0 , UpperCamelCase_: float = 0.3 , UpperCamelCase_: int = 1 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , ):
__lowerCamelCase : Dict = self._execution_device
__lowerCamelCase : Any = guidance_scale > 1.0
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase : int = torch.cat(UpperCamelCase_ , dim=0 )
__lowerCamelCase : List[Any] = image_embeds.shape[0]
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase : str = torch.cat(UpperCamelCase_ , dim=0 )
if do_classifier_free_guidance:
__lowerCamelCase : Optional[Any] = image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
__lowerCamelCase : Optional[Any] = negative_image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
__lowerCamelCase : Optional[int] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase_ )
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase : Optional[int] = [image]
if not all(isinstance(UpperCamelCase_ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F'Input is in incorrect format: {[type(UpperCamelCase_ ) for i in image]}. Currently, we only support PIL image and pytorch tensor' )
__lowerCamelCase : int = torch.cat([prepare_image(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) for i in image] , dim=0 )
__lowerCamelCase : str = image.to(dtype=image_embeds.dtype , device=UpperCamelCase_ )
__lowerCamelCase : Any = self.movq.encode(UpperCamelCase_ )["""latents"""]
__lowerCamelCase : Union[str, Any] = latents.repeat_interleave(UpperCamelCase_ , dim=0 )
self.scheduler.set_timesteps(UpperCamelCase_ , device=UpperCamelCase_ )
__lowerCamelCase, __lowerCamelCase : Optional[int] = self.get_timesteps(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase : List[str] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
__lowerCamelCase, __lowerCamelCase : int = downscale_height_and_width(UpperCamelCase_ , UpperCamelCase_ , self.movq_scale_factor )
__lowerCamelCase : int = self.prepare_latents(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , image_embeds.dtype , UpperCamelCase_ , UpperCamelCase_ )
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
__lowerCamelCase : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCamelCase : Optional[Any] = {"""image_embeds""": image_embeds}
__lowerCamelCase : Optional[int] = self.unet(
sample=UpperCamelCase_ , timestep=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , added_cond_kwargs=UpperCamelCase_ , return_dict=UpperCamelCase_ , )[0]
if do_classifier_free_guidance:
__lowerCamelCase, __lowerCamelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
__lowerCamelCase, __lowerCamelCase : List[Any] = noise_pred.chunk(2 )
__lowerCamelCase, __lowerCamelCase : str = variance_pred.chunk(2 )
__lowerCamelCase : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__lowerCamelCase : str = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__lowerCamelCase, __lowerCamelCase : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase : List[Any] = self.scheduler.step(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ , )[0]
# post-processing
__lowerCamelCase : Tuple = self.movq.decode(UpperCamelCase_ , force_not_quantize=UpperCamelCase_ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
__lowerCamelCase : Tuple = image * 0.5 + 0.5
__lowerCamelCase : Optional[Any] = image.clamp(0 , 1 )
__lowerCamelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowerCamelCase : List[str] = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 364
|
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece.model')
UpperCAmelCase_ = {'target_lang': 'fi', 'source_lang': 'en'}
UpperCAmelCase_ = '>>zh<<'
UpperCAmelCase_ = 'Helsinki-NLP/'
if is_torch_available():
UpperCAmelCase_ = 'pt'
elif is_tf_available():
UpperCAmelCase_ = 'tf'
else:
UpperCAmelCase_ = 'jax'
@require_sentencepiece
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Union[str, Any] = MarianTokenizer
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : int = True
def lowerCAmelCase__ ( self: Union[str, Any] ):
super().setUp()
__lowerCamelCase = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
__lowerCamelCase = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__lowerCamelCase = Path(self.tmpdirname )
save_json(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
__lowerCamelCase = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self: Optional[Any] , **UpperCamelCase_: Any ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Optional[int] ):
return (
"This is a test",
"This is a test",
)
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = """</s>"""
__lowerCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(UpperCamelCase_ ) , 9 )
def lowerCAmelCase__ ( self: Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = MarianTokenizer.from_pretrained(F'{ORG_NAME}opus-mt-en-de' )
__lowerCamelCase = en_de_tokenizer(["""I am a small frog"""] , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = [38, 1_21, 14, 6_97, 3_88_48, 0]
self.assertListEqual(UpperCamelCase_ , batch.input_ids[0] )
__lowerCamelCase = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = [x.name for x in Path(UpperCamelCase_ ).glob("""*""" )]
self.assertIn("""source.spm""" , UpperCamelCase_ )
MarianTokenizer.from_pretrained(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = tok(
["""I am a small frog""" * 10_00, """I am a small frog"""] , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 5_12) )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def lowerCAmelCase__ ( self: Optional[int] ):
# fmt: off
__lowerCamelCase = {"""input_ids""": [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase_ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
__lowerCamelCase = """Tämä on testi"""
__lowerCamelCase = """This is a test"""
__lowerCamelCase = [76, 7, 20_47, 2]
__lowerCamelCase = [69, 12, 11, 9_40, 2]
__lowerCamelCase = tokenizer(UpperCamelCase_ ).input_ids
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = tokenizer(text_target=UpperCamelCase_ ).input_ids
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
| 29
| 0
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCAmelCase_ = get_tests_dir('fixtures')
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: str ):
# A mock response for an HTTP head request to emulate server down
__lowerCamelCase = mock.Mock()
__lowerCamelCase = 5_00
__lowerCamelCase = {}
__lowerCamelCase = HTTPError
__lowerCamelCase = {}
# Download this model to make sure it's in the cache.
__lowerCamelCase = WavaVecaFeatureExtractor.from_pretrained("""hf-internal-testing/tiny-random-wav2vec2""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=UpperCamelCase_ ) as mock_head:
__lowerCamelCase = WavaVecaFeatureExtractor.from_pretrained("""hf-internal-testing/tiny-random-wav2vec2""" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase__ ( self: List[str] ):
# This test is for deprecated behavior and can be removed in v5
__lowerCamelCase = WavaVecaFeatureExtractor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json""" )
@is_staging_test
class lowerCamelCase__( unittest.TestCase):
@classmethod
def lowerCAmelCase__ ( cls: Optional[int] ):
__lowerCamelCase = TOKEN
HfFolder.save_token(UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: List[str] ):
try:
delete_repo(token=cls._token , repo_id="""test-feature-extractor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-feature-extractor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-feature-extractor""" )
except HTTPError:
pass
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase_ )
feature_extractor.push_to_hub("""test-feature-extractor""" , use_auth_token=self._token )
__lowerCamelCase = WavaVecaFeatureExtractor.from_pretrained(F'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-feature-extractor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
UpperCamelCase_ , repo_id="""test-feature-extractor""" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
__lowerCamelCase = WavaVecaFeatureExtractor.from_pretrained(F'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase_ )
feature_extractor.push_to_hub("""valid_org/test-feature-extractor""" , use_auth_token=self._token )
__lowerCamelCase = WavaVecaFeatureExtractor.from_pretrained("""valid_org/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-feature-extractor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
UpperCamelCase_ , repo_id="""valid_org/test-feature-extractor-org""" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
__lowerCamelCase = WavaVecaFeatureExtractor.from_pretrained("""valid_org/test-feature-extractor-org""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: Dict ):
CustomFeatureExtractor.register_for_auto_class()
__lowerCamelCase = CustomFeatureExtractor.from_pretrained(UpperCamelCase_ )
feature_extractor.push_to_hub("""test-dynamic-feature-extractor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor"""} , )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
F'{USER}/test-dynamic-feature-extractor' , trust_remote_code=UpperCamelCase_ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , """CustomFeatureExtractor""" )
| 365
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class lowerCamelCase__( unittest.TestCase):
@parameterized.expand([(None,), ("""foo.json""",)] )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: List[str] ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCamelCase_ , config_name=UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ , config_name=UpperCamelCase_ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , UpperCamelCase_ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = AutoConfig.from_pretrained("""gpt2""" )
__lowerCamelCase = GenerationConfig.from_model_config(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = GenerationConfig()
__lowerCamelCase = {
"""max_new_tokens""": 10_24,
"""foo""": """bar""",
}
__lowerCamelCase = copy.deepcopy(UpperCamelCase_ )
__lowerCamelCase = generation_config.update(**UpperCamelCase_ )
# update_kwargs was not modified (no side effects)
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 10_24 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(UpperCamelCase_ , {"""foo""": """bar"""} )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = GenerationConfig()
__lowerCamelCase = """bar"""
with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir:
generation_config.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""" )
__lowerCamelCase = GenerationConfig.from_model_config(UpperCamelCase_ )
assert not hasattr(UpperCamelCase_ , """foo""" ) # no new kwargs should be initialized if from config
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , UpperCamelCase_ )
self.assertEqual(default_config.num_beams , 1 )
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , UpperCamelCase_ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , UpperCamelCase_ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class lowerCamelCase__( unittest.TestCase):
@classmethod
def lowerCAmelCase__ ( cls: Optional[Any] ):
__lowerCamelCase = TOKEN
HfFolder.save_token(UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: str ):
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" )
except HTTPError:
pass
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained(F'{USER}/test-generation-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCamelCase_ , repo_id="""test-generation-config""" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained(F'{USER}/test-generation-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCamelCase_ , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
| 29
| 0
|
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'facebook/data2vec-base-960h': 'https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : List[Any] = 'data2vec-audio'
def __init__( self: Any , UpperCamelCase_: Tuple=32 , UpperCamelCase_: Optional[int]=7_68 , UpperCamelCase_: Any=12 , UpperCamelCase_: Optional[Any]=12 , UpperCamelCase_: List[str]=30_72 , UpperCamelCase_: List[str]="gelu" , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Optional[int]=0.0 , UpperCamelCase_: Dict=0.1 , UpperCamelCase_: str=0.1 , UpperCamelCase_: Union[str, Any]=0.02 , UpperCamelCase_: Any=1E-5 , UpperCamelCase_: int="gelu" , UpperCamelCase_: Dict=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , UpperCamelCase_: int=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase_: str=(10, 3, 3, 3, 3, 2, 2) , UpperCamelCase_: Optional[Any]=False , UpperCamelCase_: str=16 , UpperCamelCase_: int=19 , UpperCamelCase_: str=5 , UpperCamelCase_: Any=0.05 , UpperCamelCase_: Optional[int]=10 , UpperCamelCase_: Union[str, Any]=2 , UpperCamelCase_: Dict=0.0 , UpperCamelCase_: Optional[Any]=10 , UpperCamelCase_: Optional[Any]=0 , UpperCamelCase_: str="sum" , UpperCamelCase_: Dict=False , UpperCamelCase_: Union[str, Any]=False , UpperCamelCase_: Union[str, Any]=2_56 , UpperCamelCase_: int=(5_12, 5_12, 5_12, 5_12, 15_00) , UpperCamelCase_: List[str]=(5, 3, 3, 1, 1) , UpperCamelCase_: Union[str, Any]=(1, 2, 3, 1, 1) , UpperCamelCase_: Any=5_12 , UpperCamelCase_: Dict=0 , UpperCamelCase_: List[str]=1 , UpperCamelCase_: List[str]=2 , UpperCamelCase_: str=False , UpperCamelCase_: List[str]=3 , UpperCamelCase_: Optional[int]=2 , UpperCamelCase_: str=3 , UpperCamelCase_: Tuple=None , **UpperCamelCase_: List[str] , ):
super().__init__(**UpperCamelCase_ , pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ )
__lowerCamelCase = hidden_size
__lowerCamelCase = feat_extract_activation
__lowerCamelCase = list(UpperCamelCase_ )
__lowerCamelCase = list(UpperCamelCase_ )
__lowerCamelCase = list(UpperCamelCase_ )
__lowerCamelCase = conv_bias
__lowerCamelCase = num_conv_pos_embeddings
__lowerCamelCase = num_conv_pos_embedding_groups
__lowerCamelCase = conv_pos_kernel_size
__lowerCamelCase = len(self.conv_dim )
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = feat_proj_dropout
__lowerCamelCase = final_dropout
__lowerCamelCase = layerdrop
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = initializer_range
__lowerCamelCase = vocab_size
__lowerCamelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCamelCase = mask_time_prob
__lowerCamelCase = mask_time_length
__lowerCamelCase = mask_time_min_masks
__lowerCamelCase = mask_feature_prob
__lowerCamelCase = mask_feature_length
__lowerCamelCase = mask_feature_min_masks
# ctc loss
__lowerCamelCase = ctc_loss_reduction
__lowerCamelCase = ctc_zero_infinity
# adapter
__lowerCamelCase = add_adapter
__lowerCamelCase = adapter_kernel_size
__lowerCamelCase = adapter_stride
__lowerCamelCase = num_adapter_layers
__lowerCamelCase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowerCamelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowerCamelCase = list(UpperCamelCase_ )
__lowerCamelCase = list(UpperCamelCase_ )
__lowerCamelCase = list(UpperCamelCase_ )
__lowerCamelCase = xvector_output_dim
@property
def lowerCAmelCase__ ( self: Union[str, Any] ):
return math.prod(self.conv_stride )
| 366
|
def lowerCamelCase__ ( A__ : list ):
'''simple docstring'''
for i in range(len(A__ ) - 1 , 0 , -1 ):
__lowerCamelCase = False
for j in range(A__ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
__lowerCamelCase, __lowerCamelCase = unsorted[j - 1], unsorted[j]
__lowerCamelCase = True
for j in range(A__ ):
if unsorted[j] > unsorted[j + 1]:
__lowerCamelCase, __lowerCamelCase = unsorted[j + 1], unsorted[j]
__lowerCamelCase = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ = [int(item) for item in user_input.split(',')]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 29
| 0
|
def lowerCamelCase__ ( A__ : List[Any] , A__ : Any ):
'''simple docstring'''
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def lowerCamelCase__ ( A__ : Dict , A__ : Optional[Any]=0 ):
'''simple docstring'''
return sorted(A__ , key=lambda A__ : x[column] )
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : List[Any] , A__ : Optional[int]=float('inf' ) ):
'''simple docstring'''
for i in range(points_counts - 1 ):
for j in range(i + 1 , A__ ):
__lowerCamelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__lowerCamelCase = current_dis
return min_dis
def lowerCamelCase__ ( A__ : Tuple , A__ : List[Any] , A__ : int=float('inf' ) ):
'''simple docstring'''
for i in range(min(6 , points_counts - 1 ) , A__ ):
for j in range(max(0 , i - 6 ) , A__ ):
__lowerCamelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__lowerCamelCase = current_dis
return min_dis
def lowerCamelCase__ ( A__ : Optional[int] , A__ : List[str] , A__ : List[str] ):
'''simple docstring'''
if points_counts <= 3:
return dis_between_closest_pair(A__ , A__ )
# recursion
__lowerCamelCase = points_counts // 2
__lowerCamelCase = closest_pair_of_points_sqr(
A__ , points_sorted_on_y[:mid] , A__ )
__lowerCamelCase = closest_pair_of_points_sqr(
A__ , points_sorted_on_y[mid:] , points_counts - mid )
__lowerCamelCase = min(A__ , A__ )
__lowerCamelCase = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(A__ )
__lowerCamelCase = dis_between_closest_in_strip(
A__ , len(A__ ) , A__ )
return min(A__ , A__ )
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : Any ):
'''simple docstring'''
__lowerCamelCase = column_based_sort(A__ , column=0 )
__lowerCamelCase = column_based_sort(A__ , column=1 )
return (
closest_pair_of_points_sqr(
A__ , A__ , A__ )
) ** 0.5
if __name__ == "__main__":
UpperCAmelCase_ = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print('Distance:', closest_pair_of_points(points, len(points)))
| 367
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowerCamelCase__ ( A__ : Dict , A__ : Optional[int]=False ):
'''simple docstring'''
try:
__lowerCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__lowerCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
__lowerCamelCase = strtobool(A__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
UpperCAmelCase_ = parse_flag_from_env('RUN_SLOW', default=False)
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skip("""Test was skipped""" )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , """test is slow""" )(A__ )
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , """test requires only a CPU""" )(A__ )
def lowerCamelCase__ ( A__ : List[str] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , """test requires a GPU""" )(A__ )
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , """test requires a XPU""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , """test requires a `mps` backend support in `torch`""" )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , """test requires the Hugging Face suite""" )(A__ )
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , """test requires the bitsandbytes library""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , """test requires TPU""" )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , """test requires a GPU""" )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , """test requires a XPU""" )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , """test requires multiple GPUs""" )(A__ )
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , """test requires multiple XPUs""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , """test requires safetensors""" )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , """test requires DeepSpeed""" )(A__ )
def lowerCamelCase__ ( A__ : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version(""">=""" , """1.12.0""" ) , """test requires torch version >= 1.12.0""" )(A__ )
def lowerCamelCase__ ( A__ : Tuple=None , A__ : Optional[Any]=None ):
'''simple docstring'''
if test_case is None:
return partial(A__ , version=A__ )
return unittest.skipUnless(is_torch_version(""">=""" , A__ ) , f'test requires torch version >= {version}' )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , """test requires Tensorboard""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , """test requires wandb""" )(A__ )
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , """test requires comet_ml""" )(A__ )
UpperCAmelCase_ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , """test requires at least one tracker to be available and for `comet_ml` to not be installed""" , )(A__ )
class lowerCamelCase__( unittest.TestCase):
UpperCAmelCase__ : List[Any] = True
@classmethod
def lowerCAmelCase__ ( cls: int ):
__lowerCamelCase = tempfile.mkdtemp()
@classmethod
def lowerCAmelCase__ ( cls: Any ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def lowerCAmelCase__ ( self: Any ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("""**/*""" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCamelCase_ )
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: int ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Union[mock.Mock, List[mock.Mock]] ):
__lowerCamelCase = mocks if isinstance(UpperCamelCase_ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase = AcceleratorState()
__lowerCamelCase = tensor[None].clone().to(state.device )
__lowerCamelCase = gather(A__ ).cpu()
__lowerCamelCase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , A__ ):
return False
return True
class lowerCamelCase__:
def __init__( self: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: Any , UpperCamelCase_: Any ):
__lowerCamelCase = returncode
__lowerCamelCase = stdout
__lowerCamelCase = stderr
async def lowerCamelCase__ ( A__ : int , A__ : Any ):
'''simple docstring'''
while True:
__lowerCamelCase = await stream.readline()
if line:
callback(A__ )
else:
break
async def lowerCamelCase__ ( A__ : Dict , A__ : List[str]=None , A__ : Any=None , A__ : Optional[Any]=None , A__ : Tuple=False , A__ : List[Any]=False ):
'''simple docstring'''
if echo:
print("""\nRunning: """ , """ """.join(A__ ) )
__lowerCamelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=A__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=A__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__lowerCamelCase = []
__lowerCamelCase = []
def tee(A__ : int , A__ : Any , A__ : Optional[Any] , A__ : int="" ):
__lowerCamelCase = line.decode("""utf-8""" ).rstrip()
sink.append(A__ )
if not quiet:
print(A__ , A__ , file=A__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda A__ : tee(A__ , A__ , sys.stdout , label="""stdout:""" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda A__ : tee(A__ , A__ , sys.stderr , label="""stderr:""" ) ) ),
] , timeout=A__ , )
return _RunOutput(await p.wait() , A__ , A__ )
def lowerCamelCase__ ( A__ : Optional[Any] , A__ : Any=None , A__ : Union[str, Any]=None , A__ : Dict=180 , A__ : str=False , A__ : List[Any]=True ):
'''simple docstring'''
__lowerCamelCase = asyncio.get_event_loop()
__lowerCamelCase = loop.run_until_complete(
_stream_subprocess(A__ , env=A__ , stdin=A__ , timeout=A__ , quiet=A__ , echo=A__ ) )
__lowerCamelCase = """ """.join(A__ )
if result.returncode > 0:
__lowerCamelCase = """\n""".join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
return result
class lowerCamelCase__( __lowerCamelCase):
pass
def lowerCamelCase__ ( A__ : List[str] , A__ : Union[str, Any]=False ):
'''simple docstring'''
try:
__lowerCamelCase = subprocess.check_output(A__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(A__ , """decode""" ):
__lowerCamelCase = output.decode("""utf-8""" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'Command `{" ".join(A__ )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 29
| 0
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCamelCase__ ( A__ : str , A__ : Dict , A__ : int , A__ : int , A__ : Optional[Any]=True , A__ : Union[str, Any]="pt" ):
'''simple docstring'''
__lowerCamelCase = {"""add_prefix_space""": True} if isinstance(A__ , A__ ) and not line.startswith(""" """ ) else {}
__lowerCamelCase = padding_side
return tokenizer(
[line] , max_length=A__ , padding="""max_length""" if pad_to_max_length else None , truncation=A__ , return_tensors=A__ , add_special_tokens=A__ , **A__ , )
def lowerCamelCase__ ( A__ : Optional[Any] , A__ : int , A__ : Union[str, Any]=None , ):
'''simple docstring'''
__lowerCamelCase = input_ids.ne(A__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: int , UpperCamelCase_: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: Any , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Any="train" , UpperCamelCase_: Optional[Any]=None , UpperCamelCase_: Dict=None , UpperCamelCase_: Any=None , UpperCamelCase_: Optional[int]="" , ):
super().__init__()
__lowerCamelCase = Path(UpperCamelCase_ ).joinpath(type_path + """.source""" )
__lowerCamelCase = Path(UpperCamelCase_ ).joinpath(type_path + """.target""" )
__lowerCamelCase = self.get_char_lens(self.src_file )
__lowerCamelCase = max_source_length
__lowerCamelCase = max_target_length
assert min(self.src_lens ) > 0, F'found empty line in {self.src_file}'
__lowerCamelCase = tokenizer
__lowerCamelCase = prefix
if n_obs is not None:
__lowerCamelCase = self.src_lens[:n_obs]
__lowerCamelCase = src_lang
__lowerCamelCase = tgt_lang
def __len__( self: Optional[Any] ):
return len(self.src_lens )
def __getitem__( self: int , UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = index + 1 # linecache starts at 1
__lowerCamelCase = self.prefix + linecache.getline(str(self.src_file ) , UpperCamelCase_ ).rstrip("""\n""" )
__lowerCamelCase = linecache.getline(str(self.tgt_file ) , UpperCamelCase_ ).rstrip("""\n""" )
assert source_line, F'empty source line for index {index}'
assert tgt_line, F'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer , UpperCamelCase_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__lowerCamelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , UpperCamelCase_ ) else self.tokenizer
)
__lowerCamelCase = self.tokenizer.generator if isinstance(self.tokenizer , UpperCamelCase_ ) else self.tokenizer
__lowerCamelCase = encode_line(UpperCamelCase_ , UpperCamelCase_ , self.max_source_length , """right""" )
__lowerCamelCase = encode_line(UpperCamelCase_ , UpperCamelCase_ , self.max_target_length , """right""" )
__lowerCamelCase = source_inputs["""input_ids"""].squeeze()
__lowerCamelCase = target_inputs["""input_ids"""].squeeze()
__lowerCamelCase = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowerCAmelCase__ ( UpperCamelCase_: Union[str, Any] ):
return [len(UpperCamelCase_ ) for x in Path(UpperCamelCase_ ).open().readlines()]
def lowerCAmelCase__ ( self: int , UpperCamelCase_: int ):
__lowerCamelCase = torch.stack([x["""input_ids"""] for x in batch] )
__lowerCamelCase = torch.stack([x["""attention_mask"""] for x in batch] )
__lowerCamelCase = torch.stack([x["""decoder_input_ids"""] for x in batch] )
__lowerCamelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , UpperCamelCase_ )
else self.tokenizer.pad_token_id
)
__lowerCamelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , UpperCamelCase_ )
else self.tokenizer.pad_token_id
)
__lowerCamelCase = trim_batch(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase, __lowerCamelCase = trim_batch(UpperCamelCase_ , UpperCamelCase_ , attention_mask=UpperCamelCase_ )
__lowerCamelCase = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
UpperCAmelCase_ = getLogger(__name__)
def lowerCamelCase__ ( A__ : List[List] ):
'''simple docstring'''
return list(itertools.chain.from_iterable(A__ ) )
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
__lowerCamelCase = get_git_info()
save_json(A__ , os.path.join(A__ , """git_log.json""" ) )
def lowerCamelCase__ ( A__ : Dict , A__ : Optional[int] , A__ : List[Any]=4 , **A__ : Any ):
'''simple docstring'''
with open(A__ , """w""" ) as f:
json.dump(A__ , A__ , indent=A__ , **A__ )
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
with open(A__ ) as f:
return json.load(A__ )
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = git.Repo(search_parent_directories=A__ )
__lowerCamelCase = {
"""repo_id""": str(A__ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def lowerCamelCase__ ( A__ : Callable , A__ : Iterable ):
'''simple docstring'''
return list(map(A__ , A__ ) )
def lowerCamelCase__ ( A__ : int , A__ : List[Any] ):
'''simple docstring'''
with open(A__ , """wb""" ) as f:
return pickle.dump(A__ , A__ )
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
def remove_articles(A__ : List[Any] ):
return re.sub(R"""\b(a|an|the)\b""" , """ """ , A__ )
def white_space_fix(A__ : Optional[int] ):
return " ".join(text.split() )
def remove_punc(A__ : str ):
__lowerCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(A__ : Union[str, Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(A__ ) ) ) )
def lowerCamelCase__ ( A__ : int , A__ : Tuple ):
'''simple docstring'''
__lowerCamelCase = normalize_answer(A__ ).split()
__lowerCamelCase = normalize_answer(A__ ).split()
__lowerCamelCase = Counter(A__ ) & Counter(A__ )
__lowerCamelCase = sum(common.values() )
if num_same == 0:
return 0
__lowerCamelCase = 1.0 * num_same / len(A__ )
__lowerCamelCase = 1.0 * num_same / len(A__ )
__lowerCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase__ ( A__ : Dict , A__ : Optional[Any] ):
'''simple docstring'''
return normalize_answer(A__ ) == normalize_answer(A__ )
def lowerCamelCase__ ( A__ : List[str] , A__ : List[str] ):
'''simple docstring'''
assert len(A__ ) == len(A__ )
__lowerCamelCase = 0
for hypo, pred in zip(A__ , A__ ):
em += exact_match_score(A__ , A__ )
if len(A__ ) > 0:
em /= len(A__ )
return {"em": em}
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
return model_prefix.startswith("""rag""" )
def lowerCamelCase__ ( A__ : List[str] , A__ : Optional[int] , A__ : List[Any] ):
'''simple docstring'''
__lowerCamelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__lowerCamelCase = """dropout_rate"""
for p in extra_params:
if getattr(A__ , A__ , A__ ):
if not hasattr(A__ , A__ ) and not hasattr(A__ , equivalent_param[p] ):
logger.info("""config doesn't have a `{}` attribute""".format(A__ ) )
delattr(A__ , A__ )
continue
__lowerCamelCase = p if hasattr(A__ , A__ ) else equivalent_param[p]
setattr(A__ , A__ , getattr(A__ , A__ ) )
delattr(A__ , A__ )
return hparams, config
| 368
|
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
UpperCAmelCase_ = datasets.utils.logging.get_logger(__name__)
class lowerCamelCase__( folder_based_builder.FolderBasedBuilderConfig):
UpperCAmelCase__ : bool = None
UpperCAmelCase__ : bool = None
class lowerCamelCase__( folder_based_builder.FolderBasedBuilder):
UpperCAmelCase__ : List[Any] = datasets.Audio()
UpperCAmelCase__ : str = 'audio'
UpperCAmelCase__ : Union[str, Any] = AudioFolderConfig
UpperCAmelCase__ : List[str] # definition at the bottom of the script
UpperCAmelCase__ : Optional[int] = AudioClassification(audio_column='audio' , label_column='label')
UpperCAmelCase_ = [
'.aiff',
'.au',
'.avr',
'.caf',
'.flac',
'.htk',
'.svx',
'.mat4',
'.mat5',
'.mpc2k',
'.ogg',
'.paf',
'.pvf',
'.raw',
'.rf64',
'.sd2',
'.sds',
'.ircam',
'.voc',
'.w64',
'.wav',
'.nist',
'.wavex',
'.wve',
'.xi',
'.mp3',
'.opus',
]
UpperCAmelCase_ = AUDIO_EXTENSIONS
| 29
| 0
|
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : List[str] = SpeechTaTokenizer
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Dict = True
def lowerCAmelCase__ ( self: Optional[int] ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase = SpeechTaTokenizer(UpperCamelCase_ )
__lowerCamelCase = AddedToken("""<mask>""" , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ )
__lowerCamelCase = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
__lowerCamelCase = """this is a test"""
__lowerCamelCase = """this is a test"""
return input_text, output_text
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: List[str]=False , UpperCamelCase_: Any=20 , UpperCamelCase_: Tuple=5 ):
__lowerCamelCase, __lowerCamelCase = self.get_input_output_texts(UpperCamelCase_ )
__lowerCamelCase = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.decode(UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
return text, ids
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = """<pad>"""
__lowerCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ )
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-4] , """œ""" )
self.assertEqual(vocab_keys[-2] , """<mask>""" )
self.assertEqual(vocab_keys[-1] , """<ctc_blank>""" )
self.assertEqual(len(UpperCamelCase_ ) , 81 )
def lowerCAmelCase__ ( self: str ):
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.get_tokenizers(do_lower_case=UpperCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__lowerCamelCase = tokenizer.vocab_size
__lowerCamelCase = len(UpperCamelCase_ )
self.assertNotEqual(UpperCamelCase_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__lowerCamelCase = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
__lowerCamelCase = tokenizer.add_tokens(UpperCamelCase_ )
__lowerCamelCase = tokenizer.vocab_size
__lowerCamelCase = len(UpperCamelCase_ )
self.assertNotEqual(UpperCamelCase_ , 0 )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , len(UpperCamelCase_ ) )
self.assertEqual(UpperCamelCase_ , all_size + len(UpperCamelCase_ ) )
__lowerCamelCase = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=UpperCamelCase_ )
self.assertGreaterEqual(len(UpperCamelCase_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
__lowerCamelCase = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
__lowerCamelCase = tokenizer.add_special_tokens(UpperCamelCase_ )
__lowerCamelCase = tokenizer.vocab_size
__lowerCamelCase = len(UpperCamelCase_ )
self.assertNotEqual(UpperCamelCase_ , 0 )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , len(UpperCamelCase_ ) )
self.assertEqual(UpperCamelCase_ , all_size_a + len(UpperCamelCase_ ) )
__lowerCamelCase = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=UpperCamelCase_ )
self.assertGreaterEqual(len(UpperCamelCase_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def lowerCAmelCase__ ( self: Union[str, Any] ):
pass
def lowerCAmelCase__ ( self: str ):
pass
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = tokenizer.tokenize("""This is a test""" )
# fmt: off
self.assertListEqual(UpperCamelCase_ , [SPIECE_UNDERLINE, """T""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """a""", SPIECE_UNDERLINE, """t""", """e""", """s""", """t"""] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
__lowerCamelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCamelCase_ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """92000""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
__lowerCamelCase = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
# fmt: off
self.assertListEqual(UpperCamelCase_ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
__lowerCamelCase = tokenizer.convert_ids_to_tokens(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """<unk>""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
@slow
def lowerCAmelCase__ ( self: str ):
# Use custom sequence because this tokenizer does not handle numbers.
__lowerCamelCase = [
"""Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides """
"""general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural """
"""Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained """
"""models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.""",
"""BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly """
"""conditioning on both left and right context in all layers.""",
"""The quick brown fox jumps over the lazy dog.""",
]
# fmt: off
__lowerCamelCase = {
"""input_ids""": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase_ , model_name="""microsoft/speecht5_asr""" , revision="""c5ef64c71905caeccde0e4462ef3f9077224c524""" , sequences=UpperCamelCase_ , )
| 369
|
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : str = 'segformer'
def __init__( self: Union[str, Any] , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: Any=4 , UpperCamelCase_: int=[2, 2, 2, 2] , UpperCamelCase_: Optional[Any]=[8, 4, 2, 1] , UpperCamelCase_: Union[str, Any]=[32, 64, 1_60, 2_56] , UpperCamelCase_: int=[7, 3, 3, 3] , UpperCamelCase_: Dict=[4, 2, 2, 2] , UpperCamelCase_: str=[1, 2, 5, 8] , UpperCamelCase_: List[str]=[4, 4, 4, 4] , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: List[Any]=0.0 , UpperCamelCase_: List[Any]=0.0 , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: Optional[int]=1E-6 , UpperCamelCase_: Optional[int]=2_56 , UpperCamelCase_: Optional[Any]=2_55 , **UpperCamelCase_: List[Any] , ):
super().__init__(**UpperCamelCase_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , UpperCamelCase_ , )
__lowerCamelCase = num_channels
__lowerCamelCase = num_encoder_blocks
__lowerCamelCase = depths
__lowerCamelCase = sr_ratios
__lowerCamelCase = hidden_sizes
__lowerCamelCase = patch_sizes
__lowerCamelCase = strides
__lowerCamelCase = mlp_ratios
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = classifier_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = drop_path_rate
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = decoder_hidden_size
__lowerCamelCase = kwargs.get("""reshape_last_stage""" , UpperCamelCase_ )
__lowerCamelCase = semantic_loss_ignore_index
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Any = version.parse('1.11')
@property
def lowerCAmelCase__ ( self: Any ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase__ ( self: Union[str, Any] ):
return 1E-4
@property
def lowerCAmelCase__ ( self: Dict ):
return 12
| 29
| 0
|
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase)
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: List[Any] , *UpperCamelCase_: Dict , **UpperCamelCase_: Dict ):
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
self.check_model_type(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: str=None , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: Optional[int]=None , **UpperCamelCase_: List[Any] ):
__lowerCamelCase, __lowerCamelCase = {}, {}
if padding is not None:
__lowerCamelCase = padding
if truncation is not None:
__lowerCamelCase = truncation
if top_k is not None:
__lowerCamelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self: Optional[Any] , UpperCamelCase_: Union["Image.Image", str] , UpperCamelCase_: str = None , **UpperCamelCase_: List[str] ):
if isinstance(UpperCamelCase_ , (Image.Image, str) ) and isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = {"""image""": image, """question""": question}
else:
__lowerCamelCase = image
__lowerCamelCase = super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
return results
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: str , UpperCamelCase_: Any=False , UpperCamelCase_: Optional[int]=False ):
__lowerCamelCase = load_image(inputs["""image"""] )
__lowerCamelCase = self.tokenizer(
inputs["""question"""] , return_tensors=self.framework , padding=UpperCamelCase_ , truncation=UpperCamelCase_ )
__lowerCamelCase = self.image_processor(images=UpperCamelCase_ , return_tensors=self.framework )
model_inputs.update(UpperCamelCase_ )
return model_inputs
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: Tuple ):
__lowerCamelCase = self.model(**UpperCamelCase_ )
return model_outputs
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any]=5 ):
if top_k > self.model.config.num_labels:
__lowerCamelCase = self.model.config.num_labels
if self.framework == "pt":
__lowerCamelCase = model_outputs.logits.sigmoid()[0]
__lowerCamelCase, __lowerCamelCase = probs.topk(UpperCamelCase_ )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
__lowerCamelCase = scores.tolist()
__lowerCamelCase = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase_ , UpperCamelCase_ )]
| 370
|
import string
import numpy
def lowerCamelCase__ ( A__ : int , A__ : int ):
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a , A__ )
class lowerCamelCase__:
UpperCAmelCase__ : Optional[int] = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
UpperCAmelCase__ : Optional[int] = numpy.vectorize(lambda __lowerCamelCase: x % 36)
UpperCAmelCase__ : List[Any] = numpy.vectorize(__lowerCamelCase)
def __init__( self: List[Any] , UpperCamelCase_: numpy.ndarray ):
__lowerCamelCase = self.modulus(UpperCamelCase_ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__lowerCamelCase = encrypt_key.shape[0]
def lowerCAmelCase__ ( self: str , UpperCamelCase_: str ):
return self.key_string.index(UpperCamelCase_ )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: int ):
return self.key_string[round(UpperCamelCase_ )]
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__lowerCamelCase = det % len(self.key_string )
__lowerCamelCase = len(self.key_string )
if greatest_common_divisor(UpperCamelCase_ , len(self.key_string ) ) != 1:
__lowerCamelCase = (
F'determinant modular {req_l} of encryption key({det}) '
F'is not co prime w.r.t {req_l}.\nTry another key.'
)
raise ValueError(UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: str ):
__lowerCamelCase = [char for char in text.upper() if char in self.key_string]
__lowerCamelCase = chars[-1]
while len(UpperCamelCase_ ) % self.break_key != 0:
chars.append(UpperCamelCase_ )
return "".join(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
__lowerCamelCase = self.process_text(text.upper() )
__lowerCamelCase = """"""
for i in range(0 , len(UpperCamelCase_ ) - self.break_key + 1 , self.break_key ):
__lowerCamelCase = text[i : i + self.break_key]
__lowerCamelCase = [self.replace_letters(UpperCamelCase_ ) for char in batch]
__lowerCamelCase = numpy.array([vec] ).T
__lowerCamelCase = self.modulus(self.encrypt_key.dot(UpperCamelCase_ ) ).T.tolist()[
0
]
__lowerCamelCase = """""".join(
self.replace_digits(UpperCamelCase_ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__lowerCamelCase = det % len(self.key_string )
__lowerCamelCase = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
__lowerCamelCase = i
break
__lowerCamelCase = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
__lowerCamelCase = self.make_decrypt_key()
__lowerCamelCase = self.process_text(text.upper() )
__lowerCamelCase = """"""
for i in range(0 , len(UpperCamelCase_ ) - self.break_key + 1 , self.break_key ):
__lowerCamelCase = text[i : i + self.break_key]
__lowerCamelCase = [self.replace_letters(UpperCamelCase_ ) for char in batch]
__lowerCamelCase = numpy.array([vec] ).T
__lowerCamelCase = self.modulus(decrypt_key.dot(UpperCamelCase_ ) ).T.tolist()[0]
__lowerCamelCase = """""".join(
self.replace_digits(UpperCamelCase_ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = int(input("""Enter the order of the encryption key: """ ) )
__lowerCamelCase = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(A__ ):
__lowerCamelCase = [int(A__ ) for x in input().split()]
hill_matrix.append(A__ )
__lowerCamelCase = HillCipher(numpy.array(A__ ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
__lowerCamelCase = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
__lowerCamelCase = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(A__ ) )
elif option == "2":
__lowerCamelCase = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(A__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 29
| 0
|
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = 'https://openaipublic.azureedge.net/jukebox/models/'
UpperCAmelCase_ = {
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
if key.endswith(""".model.1.bias""" ) and len(key.split(""".""" ) ) > 10:
__lowerCamelCase = key.replace(""".model.1.bias""" , """.conv1d_1.bias""" )
elif key.endswith(""".model.1.weight""" ) and len(key.split(""".""" ) ) > 10:
__lowerCamelCase = key.replace(""".model.1.weight""" , """.conv1d_1.weight""" )
elif key.endswith(""".model.3.bias""" ) and len(key.split(""".""" ) ) > 10:
__lowerCamelCase = key.replace(""".model.3.bias""" , """.conv1d_2.bias""" )
elif key.endswith(""".model.3.weight""" ) and len(key.split(""".""" ) ) > 10:
__lowerCamelCase = key.replace(""".model.3.weight""" , """.conv1d_2.weight""" )
if "conditioner_blocks.0." in key:
__lowerCamelCase = key.replace("""conditioner_blocks.0""" , """conditioner_blocks""" )
if "prime_prior" in key:
__lowerCamelCase = key.replace("""prime_prior""" , """encoder""" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
__lowerCamelCase = key.replace(""".emb.""" , """.""" )
if key.endswith("""k""" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(""".k""" , """.codebook""" )
if "y_emb." in key:
return key.replace("""y_emb.""" , """metadata_embedding.""" )
if "x_emb.emb." in key:
__lowerCamelCase = key.replace("""0.x_emb.emb""" , """embed_tokens""" )
if "prime_state_ln" in key:
return key.replace("""prime_state_ln""" , """encoder.final_layer_norm""" )
if ".ln" in key:
return key.replace(""".ln""" , """.layer_norm""" )
if "_ln" in key:
return key.replace("""_ln""" , """_layer_norm""" )
if "prime_state_proj" in key:
return key.replace("""prime_state_proj""" , """encoder.proj_in""" )
if "prime_x_out" in key:
return key.replace("""prime_x_out""" , """encoder.lm_head""" )
if "prior.x_out" in key:
return key.replace("""x_out""" , """fc_proj_out""" )
if "x_emb" in key:
return key.replace("""x_emb""" , """embed_tokens""" )
return key
def lowerCamelCase__ ( A__ : Dict , A__ : Dict , A__ : Optional[int] , A__ : List[str] ):
'''simple docstring'''
__lowerCamelCase = {}
import re
__lowerCamelCase = re.compile(R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
__lowerCamelCase = re.compile(
R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
__lowerCamelCase = re.compile(R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
__lowerCamelCase = re.compile(R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
__lowerCamelCase = re.compile(
R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
__lowerCamelCase = re.compile(R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
__lowerCamelCase = re.compile(R"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)""" )
__lowerCamelCase = re.compile(
R"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
__lowerCamelCase = re.compile(R"""conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)""" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(A__ ):
__lowerCamelCase = re_encoder_block_conv_in.match(A__ )
__lowerCamelCase = regex_match.groups()
__lowerCamelCase = int(groups[2] ) * 2 + int(groups[3] )
__lowerCamelCase = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'
__lowerCamelCase = re_encoder_block_conv_in.sub(A__ , A__ )
elif re_encoder_block_resnet.fullmatch(A__ ):
__lowerCamelCase = re_encoder_block_resnet.match(A__ )
__lowerCamelCase = regex_match.groups()
__lowerCamelCase = int(groups[2] ) * 2 + int(groups[3] )
__lowerCamelCase = {"""1""": 1, """3""": 2}[groups[-2]]
__lowerCamelCase = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'
__lowerCamelCase = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__lowerCamelCase = prefix + resnet_block
__lowerCamelCase = re_encoder_block_resnet.sub(A__ , A__ )
elif re_encoder_block_proj_out.fullmatch(A__ ):
__lowerCamelCase = re_encoder_block_proj_out.match(A__ )
__lowerCamelCase = regex_match.groups()
__lowerCamelCase = f'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'
__lowerCamelCase = re_encoder_block_proj_out.sub(A__ , A__ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(A__ ):
__lowerCamelCase = re_decoder_block_conv_out.match(A__ )
__lowerCamelCase = regex_match.groups()
__lowerCamelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
__lowerCamelCase = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'
__lowerCamelCase = re_decoder_block_conv_out.sub(A__ , A__ )
elif re_decoder_block_resnet.fullmatch(A__ ):
__lowerCamelCase = re_decoder_block_resnet.match(A__ )
__lowerCamelCase = regex_match.groups()
__lowerCamelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
__lowerCamelCase = {"""1""": 1, """3""": 2}[groups[-2]]
__lowerCamelCase = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'
__lowerCamelCase = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__lowerCamelCase = prefix + resnet_block
__lowerCamelCase = re_decoder_block_resnet.sub(A__ , A__ )
elif re_decoder_block_proj_in.fullmatch(A__ ):
__lowerCamelCase = re_decoder_block_proj_in.match(A__ )
__lowerCamelCase = regex_match.groups()
__lowerCamelCase = f'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'
__lowerCamelCase = re_decoder_block_proj_in.sub(A__ , A__ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(A__ ):
__lowerCamelCase = re_prior_cond_conv_out.match(A__ )
__lowerCamelCase = regex_match.groups()
__lowerCamelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
__lowerCamelCase = f'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'
__lowerCamelCase = re_prior_cond_conv_out.sub(A__ , A__ )
elif re_prior_cond_resnet.fullmatch(A__ ):
__lowerCamelCase = re_prior_cond_resnet.match(A__ )
__lowerCamelCase = regex_match.groups()
__lowerCamelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
__lowerCamelCase = {"""1""": 1, """3""": 2}[groups[-2]]
__lowerCamelCase = f'conditioner_blocks.upsampler.upsample_block.{block_index}.'
__lowerCamelCase = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__lowerCamelCase = prefix + resnet_block
__lowerCamelCase = re_prior_cond_resnet.sub(A__ , A__ )
elif re_prior_cond_proj_in.fullmatch(A__ ):
__lowerCamelCase = re_prior_cond_proj_in.match(A__ )
__lowerCamelCase = regex_match.groups()
__lowerCamelCase = f'conditioner_blocks.upsampler.proj_in.{groups[-1]}'
__lowerCamelCase = re_prior_cond_proj_in.sub(A__ , A__ )
# keep original key
else:
__lowerCamelCase = original_key
__lowerCamelCase = replace_key(A__ )
if f'{key_prefix}.{key}' not in model_state_dict or key is None:
print(f'failed converting {original_key} to {key}, does not match' )
# handle missmatched shape
elif value.shape != model_state_dict[f'{key_prefix}.{key}'].shape:
__lowerCamelCase = model_state_dict[f'{key_prefix}.{key}']
print(f'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' )
__lowerCamelCase = original_key
__lowerCamelCase = original_key
__lowerCamelCase = value
return new_dict
@torch.no_grad()
def lowerCamelCase__ ( A__ : Tuple=None , A__ : List[Any]=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ):
__lowerCamelCase = requests.get(f'{PREFIX}{file}' , allow_redirects=A__ )
os.makedirs(f'{pytorch_dump_folder_path}/' , exist_ok=A__ )
open(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' , """wb""" ).write(r.content )
__lowerCamelCase = MODEL_MAPPING[model_name.split("""/""" )[-1]]
__lowerCamelCase = JukeboxConfig.from_pretrained(A__ )
__lowerCamelCase = JukeboxModel(A__ )
__lowerCamelCase = []
__lowerCamelCase = {}
for i, dict_name in enumerate(A__ ):
__lowerCamelCase = torch.load(f'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )["""model"""]
__lowerCamelCase = {}
for k in old_dic.keys():
if k.endswith(""".b""" ):
__lowerCamelCase = old_dic[k]
elif k.endswith(""".w""" ):
__lowerCamelCase = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
__lowerCamelCase = old_dic[k]
else:
__lowerCamelCase = old_dic[k]
__lowerCamelCase = """vqvae""" if i == 0 else f'priors.{3 - i}'
__lowerCamelCase = fix_jukebox_keys(A__ , model.state_dict() , A__ , A__ )
weight_dict.append(A__ )
__lowerCamelCase = weight_dict.pop(0 )
model.vqvae.load_state_dict(A__ )
for i in range(len(A__ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(A__ ).mkdir(exist_ok=A__ )
with open(f'{pytorch_dump_folder_path}/mapping.json' , """w""" ) as txtfile:
json.dump(A__ , A__ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(A__ )
return weight_dict
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
UpperCAmelCase_ = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 371
|
import qiskit
def lowerCamelCase__ ( A__ : int , A__ : int ):
'''simple docstring'''
__lowerCamelCase = qiskit.Aer.get_backend("""aer_simulator""" )
__lowerCamelCase = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
__lowerCamelCase = qiskit.execute(A__ , A__ , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(A__ )
if __name__ == "__main__":
UpperCAmelCase_ = half_adder(1, 1)
print(f"""Half Adder Output Qubit Counts: {counts}""")
| 29
| 0
|
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
UpperCAmelCase_ = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowerCamelCase__( unittest.TestCase):
def __init__( self: List[Any] , UpperCamelCase_: Dict , UpperCamelCase_: int=7 , UpperCamelCase_: Dict=3 , UpperCamelCase_: int=18 , UpperCamelCase_: Tuple=30 , UpperCamelCase_: Optional[Any]=4_00 , UpperCamelCase_: Tuple=None , UpperCamelCase_: Dict=True , UpperCamelCase_: List[Any]=True , UpperCamelCase_: Dict=None , ):
__lowerCamelCase = size if size is not None else {"""height""": 20, """width""": 20}
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = num_channels
__lowerCamelCase = image_size
__lowerCamelCase = min_resolution
__lowerCamelCase = max_resolution
__lowerCamelCase = size
__lowerCamelCase = do_normalize
__lowerCamelCase = do_convert_rgb
__lowerCamelCase = [5_12, 10_24, 20_48, 40_96]
__lowerCamelCase = patch_size if patch_size is not None else {"""height""": 16, """width""": 16}
def lowerCAmelCase__ ( self: Dict ):
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = """https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"""
__lowerCamelCase = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ).convert("""RGB""" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : List[Any] = PixaStructImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = PixaStructImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self: Dict ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCamelCase_ , """do_convert_rgb""" ) )
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = self.image_processor_tester.prepare_dummy_image()
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
__lowerCamelCase = 20_48
__lowerCamelCase = image_processor(UpperCamelCase_ , return_tensors="""pt""" , max_patches=UpperCamelCase_ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1E-3 , rtol=1E-3 ) )
def lowerCAmelCase__ ( self: Any ):
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
UpperCamelCase_ , return_tensors="""pt""" , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCAmelCase__ ( self: List[str] ):
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
__lowerCamelCase = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=UpperCamelCase_ ).flattened_patches
__lowerCamelCase = """Hello"""
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=UpperCamelCase_ , header_text=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
UpperCamelCase_ , return_tensors="""pt""" , max_patches=UpperCamelCase_ , header_text=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCAmelCase__ ( self: str ):
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
__lowerCamelCase = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
UpperCamelCase_ , return_tensors="""pt""" , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCAmelCase__ ( self: List[str] ):
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
UpperCamelCase_ , return_tensors="""pt""" , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : List[Any] = PixaStructImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = PixaStructImageProcessingTester(self , num_channels=4 )
__lowerCamelCase = 3
@property
def lowerCAmelCase__ ( self: Dict ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCamelCase_ , """do_convert_rgb""" ) )
def lowerCAmelCase__ ( self: int ):
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
UpperCamelCase_ , return_tensors="""pt""" , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 350
|
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = [[0 for _ in range(A__ )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__lowerCamelCase = 1
for n in range(m + 1 ):
for k in range(1 , A__ ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
UpperCAmelCase_ = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
UpperCAmelCase_ = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 29
| 0
|
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class lowerCamelCase__:
def __init__( self: Union[str, Any] , UpperCamelCase_: Any , UpperCamelCase_: Union[str, Any]=13 , UpperCamelCase_: Optional[int]=7 , UpperCamelCase_: Tuple=True , UpperCamelCase_: List[str]=True , UpperCamelCase_: List[Any]=99 , UpperCamelCase_: Any=32 , UpperCamelCase_: int=5 , UpperCamelCase_: List[str]=4 , UpperCamelCase_: Union[str, Any]=37 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Dict=50 , UpperCamelCase_: str=0.02 , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: str=None , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = initializer_range
__lowerCamelCase = use_labels
__lowerCamelCase = scope
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = self.get_config()
return config, input_ids, input_mask, token_labels
def lowerCAmelCase__ ( self: Tuple ):
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self: Any ):
(
(
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
),
) = self.prepare_config_and_inputs()
__lowerCamelCase = True
__lowerCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: List[str] , UpperCamelCase_: Any , UpperCamelCase_: Any , UpperCamelCase_: Tuple , **UpperCamelCase_: Tuple , ):
__lowerCamelCase = BertGenerationEncoder(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
__lowerCamelCase = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: Any , UpperCamelCase_: List[Any] , UpperCamelCase_: Tuple , **UpperCamelCase_: Optional[Any] , ):
__lowerCamelCase = True
__lowerCamelCase = BertGenerationEncoder(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )
__lowerCamelCase = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: Tuple , UpperCamelCase_: List[Any] , **UpperCamelCase_: Optional[Any] , ):
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = BertGenerationDecoder(config=UpperCamelCase_ ).to(UpperCamelCase_ ).eval()
# first forward pass
__lowerCamelCase = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ , )
__lowerCamelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowerCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCamelCase = torch.cat([input_mask, next_mask] , dim=-1 )
__lowerCamelCase = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )["""hidden_states"""][0]
__lowerCamelCase = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )["""hidden_states"""][0]
# select random slice
__lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowerCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: str , UpperCamelCase_: List[Any] , *UpperCamelCase_: str , ):
__lowerCamelCase = BertGenerationDecoder(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Any = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
UpperCAmelCase__ : List[Any] = (BertGenerationDecoder,) if is_torch_available() else ()
UpperCAmelCase__ : Union[str, Any] = (
{'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder}
if is_torch_available()
else {}
)
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = BertGenerationEncoderTester(self )
__lowerCamelCase = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 )
def lowerCAmelCase__ ( self: Dict ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = """bert"""
self.model_tester.create_and_check_model(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] ):
# This regression test was failing with PyTorch < 1.3
(
(
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
),
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__lowerCamelCase = None
self.model_tester.create_and_check_model_as_decoder(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*UpperCamelCase_ )
@slow
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
self.assertIsNotNone(UpperCamelCase_ )
@require_torch
class lowerCamelCase__( unittest.TestCase):
@slow
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
__lowerCamelCase = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]] )
with torch.no_grad():
__lowerCamelCase = model(UpperCamelCase_ )[0]
__lowerCamelCase = torch.Size([1, 8, 10_24] )
self.assertEqual(output.shape , UpperCamelCase_ )
__lowerCamelCase = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
@require_torch
class lowerCamelCase__( unittest.TestCase):
@slow
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
__lowerCamelCase = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]] )
with torch.no_grad():
__lowerCamelCase = model(UpperCamelCase_ )[0]
__lowerCamelCase = torch.Size([1, 8, 5_03_58] )
self.assertEqual(output.shape , UpperCamelCase_ )
__lowerCamelCase = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 351
|
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase)
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: Tuple , *UpperCamelCase_: Dict , **UpperCamelCase_: Optional[int] ):
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
requires_backends(self , """decord""" )
self.check_model_type(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: int=None , UpperCamelCase_: List[Any]=None , UpperCamelCase_: Optional[int]=None ):
__lowerCamelCase = {}
if frame_sampling_rate is not None:
__lowerCamelCase = frame_sampling_rate
if num_frames is not None:
__lowerCamelCase = num_frames
__lowerCamelCase = {}
if top_k is not None:
__lowerCamelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self: Any , UpperCamelCase_: Union[str, List[str]] , **UpperCamelCase_: str ):
return super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[str]=None , UpperCamelCase_: List[Any]=1 ):
if num_frames is None:
__lowerCamelCase = self.model.config.num_frames
if video.startswith("""http://""" ) or video.startswith("""https://""" ):
__lowerCamelCase = BytesIO(requests.get(UpperCamelCase_ ).content )
__lowerCamelCase = VideoReader(UpperCamelCase_ )
videoreader.seek(0 )
__lowerCamelCase = 0
__lowerCamelCase = num_frames * frame_sampling_rate - 1
__lowerCamelCase = np.linspace(UpperCamelCase_ , UpperCamelCase_ , num=UpperCamelCase_ , dtype=np.intaa )
__lowerCamelCase = videoreader.get_batch(UpperCamelCase_ ).asnumpy()
__lowerCamelCase = list(UpperCamelCase_ )
__lowerCamelCase = self.image_processor(UpperCamelCase_ , return_tensors=self.framework )
return model_inputs
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Any ):
__lowerCamelCase = self.model(**UpperCamelCase_ )
return model_outputs
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: List[Any] , UpperCamelCase_: Optional[int]=5 ):
if top_k > self.model.config.num_labels:
__lowerCamelCase = self.model.config.num_labels
if self.framework == "pt":
__lowerCamelCase = model_outputs.logits.softmax(-1 )[0]
__lowerCamelCase, __lowerCamelCase = probs.topk(UpperCamelCase_ )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
__lowerCamelCase = scores.tolist()
__lowerCamelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase_ , UpperCamelCase_ )]
| 29
| 0
|
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def lowerCamelCase__ ( A__ : Features ):
'''simple docstring'''
__lowerCamelCase = np.inf
def set_batch_size(A__ : FeatureType ) -> None:
nonlocal batch_size
if isinstance(A__ , A__ ):
__lowerCamelCase = min(A__ , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(A__ , A__ ):
__lowerCamelCase = min(A__ , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(A__ , A__ ) and feature.dtype == "binary":
__lowerCamelCase = min(A__ , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(A__ , A__ )
return None if batch_size is np.inf else batch_size
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: int , UpperCamelCase_: NestedDataStructureLike[PathLike] , UpperCamelCase_: Optional[NamedSplit] = None , UpperCamelCase_: Optional[Features] = None , UpperCamelCase_: str = None , UpperCamelCase_: bool = False , UpperCamelCase_: bool = False , UpperCamelCase_: Optional[int] = None , **UpperCamelCase_: Optional[Any] , ):
super().__init__(
UpperCamelCase_ , split=UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , num_proc=UpperCamelCase_ , **UpperCamelCase_ , )
__lowerCamelCase = path_or_paths if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else {self.split: path_or_paths}
__lowerCamelCase = _PACKAGED_DATASETS_MODULES["""parquet"""][1]
__lowerCamelCase = Parquet(
cache_dir=UpperCamelCase_ , data_files=UpperCamelCase_ , features=UpperCamelCase_ , hash=UpperCamelCase_ , **UpperCamelCase_ , )
def lowerCAmelCase__ ( self: Dict ):
# Build iterable dataset
if self.streaming:
__lowerCamelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
self.builder.download_and_prepare(
download_config=UpperCamelCase_ , download_mode=UpperCamelCase_ , verification_mode=UpperCamelCase_ , base_path=UpperCamelCase_ , num_proc=self.num_proc , )
__lowerCamelCase = self.builder.as_dataset(
split=self.split , verification_mode=UpperCamelCase_ , in_memory=self.keep_in_memory )
return dataset
class lowerCamelCase__:
def __init__( self: Optional[int] , UpperCamelCase_: Dataset , UpperCamelCase_: Union[PathLike, BinaryIO] , UpperCamelCase_: Optional[int] = None , **UpperCamelCase_: Any , ):
__lowerCamelCase = dataset
__lowerCamelCase = path_or_buf
__lowerCamelCase = batch_size or get_writer_batch_size(dataset.features )
__lowerCamelCase = parquet_writer_kwargs
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , """wb+""" ) as buffer:
__lowerCamelCase = self._write(file_obj=UpperCamelCase_ , batch_size=UpperCamelCase_ , **self.parquet_writer_kwargs )
else:
__lowerCamelCase = self._write(file_obj=self.path_or_buf , batch_size=UpperCamelCase_ , **self.parquet_writer_kwargs )
return written
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: BinaryIO , UpperCamelCase_: int , **UpperCamelCase_: List[Any] ):
__lowerCamelCase = 0
__lowerCamelCase = parquet_writer_kwargs.pop("""path_or_buf""" , UpperCamelCase_ )
__lowerCamelCase = self.dataset.features.arrow_schema
__lowerCamelCase = pq.ParquetWriter(UpperCamelCase_ , schema=UpperCamelCase_ , **UpperCamelCase_ )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , UpperCamelCase_ ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating parquet from Arrow format""" , ):
__lowerCamelCase = query_table(
table=self.dataset._data , key=slice(UpperCamelCase_ , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(UpperCamelCase_ )
written += batch.nbytes
writer.close()
return written
| 352
|
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase)
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: List[Any] , *UpperCamelCase_: Dict , **UpperCamelCase_: Dict ):
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
self.check_model_type(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: str=None , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: Optional[int]=None , **UpperCamelCase_: List[Any] ):
__lowerCamelCase, __lowerCamelCase = {}, {}
if padding is not None:
__lowerCamelCase = padding
if truncation is not None:
__lowerCamelCase = truncation
if top_k is not None:
__lowerCamelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self: Optional[Any] , UpperCamelCase_: Union["Image.Image", str] , UpperCamelCase_: str = None , **UpperCamelCase_: List[str] ):
if isinstance(UpperCamelCase_ , (Image.Image, str) ) and isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = {"""image""": image, """question""": question}
else:
__lowerCamelCase = image
__lowerCamelCase = super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
return results
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: str , UpperCamelCase_: Any=False , UpperCamelCase_: Optional[int]=False ):
__lowerCamelCase = load_image(inputs["""image"""] )
__lowerCamelCase = self.tokenizer(
inputs["""question"""] , return_tensors=self.framework , padding=UpperCamelCase_ , truncation=UpperCamelCase_ )
__lowerCamelCase = self.image_processor(images=UpperCamelCase_ , return_tensors=self.framework )
model_inputs.update(UpperCamelCase_ )
return model_inputs
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: Tuple ):
__lowerCamelCase = self.model(**UpperCamelCase_ )
return model_outputs
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any]=5 ):
if top_k > self.model.config.num_labels:
__lowerCamelCase = self.model.config.num_labels
if self.framework == "pt":
__lowerCamelCase = model_outputs.logits.sigmoid()[0]
__lowerCamelCase, __lowerCamelCase = probs.topk(UpperCamelCase_ )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
__lowerCamelCase = scores.tolist()
__lowerCamelCase = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase_ , UpperCamelCase_ )]
| 29
| 0
|
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Optional[Any] = VideoToVideoSDPipeline
UpperCAmelCase__ : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'video'}) - {'image', 'width', 'height'}
UpperCAmelCase__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'video'}) - {'image'}
UpperCAmelCase__ : Dict = PipelineTesterMixin.required_optional_params - {'latents'}
UpperCAmelCase__ : Optional[int] = False
# No `output_type`.
UpperCAmelCase__ : Union[str, Any] = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
])
def lowerCAmelCase__ ( self: str ):
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
__lowerCamelCase = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , )
torch.manual_seed(0 )
__lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
__lowerCamelCase = CLIPTextModel(UpperCamelCase_ )
__lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCamelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[Any]=0 ):
# 3 frames
__lowerCamelCase = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""video""": video,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = VideoToVideoSDPipeline(**UpperCamelCase_ )
__lowerCamelCase = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ )
__lowerCamelCase = """np"""
__lowerCamelCase = sd_pipe(**UpperCamelCase_ ).frames
__lowerCamelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
__lowerCamelCase = np.array([1_06, 1_17, 1_13, 1_74, 1_37, 1_12, 1_48, 1_51, 1_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase__ ( self: str ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCamelCase_ , expected_max_diff=5E-3 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCAmelCase__ ( self: Tuple ):
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCAmelCase__ ( self: Tuple ):
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def lowerCAmelCase__ ( self: Tuple ):
pass
def lowerCAmelCase__ ( self: str ):
return super().test_progress_bar()
@slow
@skip_mps
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = VideoToVideoSDPipeline.from_pretrained("""cerspense/zeroscope_v2_XL""" , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
__lowerCamelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCamelCase = torch.randn((1, 10, 3, 10_24, 5_76) , generator=UpperCamelCase_ )
__lowerCamelCase = video.to("""cuda""" )
__lowerCamelCase = """Spiderman is surfing"""
__lowerCamelCase = pipe(UpperCamelCase_ , video=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=3 , output_type="""pt""" ).frames
__lowerCamelCase = np.array([-1.045_8984, -1.127_9297, -0.966_3086, -0.9150_3906, -0.7509_7656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 353
|
UpperCAmelCase_ = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
UpperCAmelCase_ = ['a', 'b', 'c', 'd', 'e']
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : Optional[int] , A__ : str ):
'''simple docstring'''
__lowerCamelCase = start
# add current to visited
visited.append(A__ )
__lowerCamelCase = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__lowerCamelCase = topological_sort(A__ , A__ , A__ )
# if all neighbors visited add current to sort
sort.append(A__ )
# if all vertices haven't been visited select a new one to visit
if len(A__ ) != len(A__ ):
for vertice in vertices:
if vertice not in visited:
__lowerCamelCase = topological_sort(A__ , A__ , A__ )
# return sort
return sort
if __name__ == "__main__":
UpperCAmelCase_ = topological_sort('a', [], [])
print(sort)
| 29
| 0
|
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='%(message)s')
def lowerCamelCase__ ( A__ : np.ndarray ):
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def lowerCamelCase__ ( A__ : np.ndarray , A__ : np.ndarray , A__ : int ):
'''simple docstring'''
__lowerCamelCase = np.nan
for i in range(A__ ):
__lowerCamelCase = features[:, labels == i]
__lowerCamelCase = data.mean(1 )
# Centralize the data of class i
__lowerCamelCase = data - column_reshape(A__ )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(A__ , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
__lowerCamelCase = np.dot(A__ , centered_data.T )
return covariance_sum / features.shape[1]
def lowerCamelCase__ ( A__ : np.ndarray , A__ : np.ndarray , A__ : int ):
'''simple docstring'''
__lowerCamelCase = features.mean(1 )
__lowerCamelCase = np.nan
for i in range(A__ ):
__lowerCamelCase = features[:, labels == i]
__lowerCamelCase = data.shape[1]
__lowerCamelCase = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(A__ ) - column_reshape(A__ ) , (column_reshape(A__ ) - column_reshape(A__ )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
__lowerCamelCase = device_data * np.dot(
column_reshape(A__ ) - column_reshape(A__ ) , (column_reshape(A__ ) - column_reshape(A__ )).T , )
return covariance_sum / features.shape[1]
def lowerCamelCase__ ( A__ : np.ndarray , A__ : int ):
'''simple docstring'''
if features.any():
__lowerCamelCase = features.mean(1 )
# Center the dataset
__lowerCamelCase = features - np.reshape(A__ , (data_mean.size, 1) )
__lowerCamelCase = np.dot(A__ , centered_data.T ) / features.shape[1]
__lowerCamelCase, __lowerCamelCase = np.linalg.eigh(A__ )
# Take all the columns in the reverse order (-1), and then takes only the first
__lowerCamelCase = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
__lowerCamelCase = np.dot(filtered_eigenvectors.T , A__ )
logging.info("""Principal Component Analysis computed""" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=A__ )
logging.error("""Dataset empty""" )
raise AssertionError
def lowerCamelCase__ ( A__ : np.ndarray , A__ : np.ndarray , A__ : int , A__ : int ):
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
__lowerCamelCase, __lowerCamelCase = eigh(
covariance_between_classes(A__ , A__ , A__ ) , covariance_within_classes(A__ , A__ , A__ ) , )
__lowerCamelCase = eigenvectors[:, ::-1][:, :dimensions]
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = np.linalg.svd(A__ )
__lowerCamelCase = svd_matrix[:, 0:dimensions]
__lowerCamelCase = np.dot(filtered_svd_matrix.T , A__ )
logging.info("""Linear Discriminant Analysis computed""" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=A__ )
logging.error("""Dataset empty""" )
raise AssertionError
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
__lowerCamelCase = np.array([0, 0, 0, 1, 1] )
__lowerCamelCase = 2
__lowerCamelCase = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(A__ ) as error_info:
__lowerCamelCase = linear_discriminant_analysis(
A__ , A__ , A__ , A__ )
if isinstance(A__ , np.ndarray ):
raise AssertionError(
"""Did not raise AssertionError for dimensions > classes""" )
assert error_info.type is AssertionError
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
__lowerCamelCase = 2
__lowerCamelCase = np.array([[6.92_820_323, 8.66_025_404, 10.39_230_485], [3.0, 3.0, 3.0]] )
with pytest.raises(A__ ) as error_info:
__lowerCamelCase = principal_component_analysis(A__ , A__ )
if not np.allclose(A__ , A__ ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354
|
import requests
from bsa import BeautifulSoup
def lowerCamelCase__ ( A__ : str = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
__lowerCamelCase = BeautifulSoup(requests.get(A__ ).text , """html.parser""" )
__lowerCamelCase = soup.findAll("""h1""" )
__lowerCamelCase = soup.findAll("""div""" , {"""class""": """maincounter-number"""} )
keys += soup.findAll("""span""" , {"""class""": """panel-title"""} )
values += soup.findAll("""div""" , {"""class""": """number-table-main"""} )
return {key.text.strip(): value.text.strip() for key, value in zip(A__ , A__ )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(f"""{key}\n{value}\n""")
| 29
| 0
|
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger()
def lowerCamelCase__ ( A__ : int , A__ : str , A__ : LevitConfig , A__ : Path , A__ : bool = True ):
print(f'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
__lowerCamelCase = timm.create_model("""levit_128s""" , pretrained=A__ )
else:
__lowerCamelCase = timm.create_model("""levit_128""" , pretrained=A__ )
if hidden_sizes == 192:
__lowerCamelCase = timm.create_model("""levit_192""" , pretrained=A__ )
if hidden_sizes == 256:
__lowerCamelCase = timm.create_model("""levit_256""" , pretrained=A__ )
if hidden_sizes == 384:
__lowerCamelCase = timm.create_model("""levit_384""" , pretrained=A__ )
from_model.eval()
__lowerCamelCase = LevitForImageClassificationWithTeacher(A__ ).eval()
__lowerCamelCase = OrderedDict()
__lowerCamelCase = from_model.state_dict()
__lowerCamelCase = list(from_model.state_dict().keys() )
__lowerCamelCase = list(our_model.state_dict().keys() )
print(len(A__ ) , len(A__ ) )
for i in range(len(A__ ) ):
__lowerCamelCase = weights[og_keys[i]]
our_model.load_state_dict(A__ )
__lowerCamelCase = torch.randn((2, 3, 224, 224) )
__lowerCamelCase = from_model(A__ )
__lowerCamelCase = our_model(A__ ).logits
assert torch.allclose(A__ , A__ ), "The model logits don't match the original one."
__lowerCamelCase = name
print(A__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
__lowerCamelCase = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f'Pushed {checkpoint_name}' )
def lowerCamelCase__ ( A__ : Path , A__ : str = None , A__ : bool = True ):
__lowerCamelCase = """imagenet-1k-id2label.json"""
__lowerCamelCase = 1000
__lowerCamelCase = (1, num_labels)
__lowerCamelCase = """huggingface/label-files"""
__lowerCamelCase = num_labels
__lowerCamelCase = json.load(open(hf_hub_download(A__ , A__ , repo_type="""dataset""" ) , """r""" ) )
__lowerCamelCase = {int(A__ ): v for k, v in idalabel.items()}
__lowerCamelCase = idalabel
__lowerCamelCase = {v: k for k, v in idalabel.items()}
__lowerCamelCase = partial(A__ , num_labels=A__ , idalabel=A__ , labelaid=A__ )
__lowerCamelCase = {
"""levit-128S""": 128,
"""levit-128""": 128,
"""levit-192""": 192,
"""levit-256""": 256,
"""levit-384""": 384,
}
__lowerCamelCase = {
"""levit-128S""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-128""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-192""": ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-256""": ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-384""": ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , A__ , names_to_config[model_name] , A__ , A__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , A__ , A__ , A__ , A__ )
return config, expected_shape
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 355
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Union[str, Any] = 'yolos'
def __init__( self: Dict , UpperCamelCase_: List[Any]=7_68 , UpperCamelCase_: Tuple=12 , UpperCamelCase_: int=12 , UpperCamelCase_: int=30_72 , UpperCamelCase_: List[str]="gelu" , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: int=0.0 , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: Dict=1E-12 , UpperCamelCase_: List[Any]=[5_12, 8_64] , UpperCamelCase_: Optional[int]=16 , UpperCamelCase_: Any=3 , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: List[str]=1_00 , UpperCamelCase_: List[str]=True , UpperCamelCase_: Any=False , UpperCamelCase_: Optional[Any]=1 , UpperCamelCase_: Any=5 , UpperCamelCase_: Any=2 , UpperCamelCase_: Tuple=5 , UpperCamelCase_: str=2 , UpperCamelCase_: Any=0.1 , **UpperCamelCase_: Any , ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = qkv_bias
__lowerCamelCase = num_detection_tokens
__lowerCamelCase = use_mid_position_embeddings
__lowerCamelCase = auxiliary_loss
# Hungarian matcher
__lowerCamelCase = class_cost
__lowerCamelCase = bbox_cost
__lowerCamelCase = giou_cost
# Loss coefficients
__lowerCamelCase = bbox_loss_coefficient
__lowerCamelCase = giou_loss_coefficient
__lowerCamelCase = eos_coefficient
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = version.parse('1.11')
@property
def lowerCAmelCase__ ( self: Any ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase__ ( self: Dict ):
return 1E-4
@property
def lowerCAmelCase__ ( self: Dict ):
return 12
| 29
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 356
|
import os
from math import logaa
def lowerCamelCase__ ( A__ : str = "base_exp.txt" ):
'''simple docstring'''
__lowerCamelCase = 0
__lowerCamelCase = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(A__ ) , A__ ) ) ):
__lowerCamelCase, __lowerCamelCase = list(map(A__ , line.split(""",""" ) ) )
if x * logaa(A__ ) > largest:
__lowerCamelCase = x * logaa(A__ )
__lowerCamelCase = i + 1
return result
if __name__ == "__main__":
print(solution())
| 29
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json',
'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json',
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json',
'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json',
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = 'funnel'
UpperCAmelCase__ : Optional[Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
}
def __init__( self: str , UpperCamelCase_: Optional[Any]=3_05_22 , UpperCamelCase_: Optional[Any]=[4, 4, 4] , UpperCamelCase_: List[str]=None , UpperCamelCase_: Optional[int]=2 , UpperCamelCase_: Optional[Any]=7_68 , UpperCamelCase_: Optional[int]=12 , UpperCamelCase_: Optional[Any]=64 , UpperCamelCase_: Any=30_72 , UpperCamelCase_: Optional[Any]="gelu_new" , UpperCamelCase_: List[str]=0.1 , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: Optional[int]=0.1 , UpperCamelCase_: Dict=None , UpperCamelCase_: Optional[int]=1E-9 , UpperCamelCase_: Optional[Any]="mean" , UpperCamelCase_: Any="relative_shift" , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Any=True , UpperCamelCase_: Union[str, Any]=True , **UpperCamelCase_: List[str] , ):
__lowerCamelCase = vocab_size
__lowerCamelCase = block_sizes
__lowerCamelCase = [1] * len(UpperCamelCase_ ) if block_repeats is None else block_repeats
assert len(UpperCamelCase_ ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
__lowerCamelCase = num_decoder_layers
__lowerCamelCase = d_model
__lowerCamelCase = n_head
__lowerCamelCase = d_head
__lowerCamelCase = d_inner
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = initializer_range
__lowerCamelCase = initializer_std
__lowerCamelCase = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F'Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'
__lowerCamelCase = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F'Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'
__lowerCamelCase = attention_type
__lowerCamelCase = separate_cls
__lowerCamelCase = truncate_seq
__lowerCamelCase = pool_q_only
super().__init__(**UpperCamelCase_ )
@property
def lowerCAmelCase__ ( self: Tuple ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def lowerCAmelCase__ ( self: str , UpperCamelCase_: Optional[Any] ):
raise NotImplementedError(
"""This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.""" )
@property
def lowerCAmelCase__ ( self: Optional[int] ):
return len(self.block_sizes )
@num_blocks.setter
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Tuple ):
raise NotImplementedError("""This model does not support the setting of `num_blocks`. Please set `block_sizes`.""" )
| 357
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCamelCase__ ( A__ : Tuple , A__ : Optional[int]=0.999 , A__ : Any="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(A__ : Any ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(A__ : Optional[int] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'Unsupported alpha_tranform_type: {alpha_transform_type}' )
__lowerCamelCase = []
for i in range(A__ ):
__lowerCamelCase = i / num_diffusion_timesteps
__lowerCamelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(A__ ) / alpha_bar_fn(A__ ) , A__ ) )
return torch.tensor(A__ , dtype=torch.floataa )
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
UpperCAmelCase__ : List[str] = [e.name for e in KarrasDiffusionSchedulers]
UpperCAmelCase__ : Any = 2
@register_to_config
def __init__( self: List[str] , UpperCamelCase_: int = 10_00 , UpperCamelCase_: float = 0.0_0085 , UpperCamelCase_: float = 0.012 , UpperCamelCase_: str = "linear" , UpperCamelCase_: Optional[Union[np.ndarray, List[float]]] = None , UpperCamelCase_: str = "epsilon" , UpperCamelCase_: str = "linspace" , UpperCamelCase_: int = 0 , ):
if trained_betas is not None:
__lowerCamelCase = torch.tensor(UpperCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "linear":
__lowerCamelCase = torch.linspace(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__lowerCamelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , UpperCamelCase_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__lowerCamelCase = betas_for_alpha_bar(UpperCamelCase_ )
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' )
__lowerCamelCase = 1.0 - self.betas
__lowerCamelCase = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[Any]=None ):
if schedule_timesteps is None:
__lowerCamelCase = self.timesteps
__lowerCamelCase = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__lowerCamelCase = 1 if len(UpperCamelCase_ ) > 1 else 0
else:
__lowerCamelCase = timestep.cpu().item() if torch.is_tensor(UpperCamelCase_ ) else timestep
__lowerCamelCase = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowerCAmelCase__ ( self: Optional[int] ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: Union[float, torch.FloatTensor] , ):
__lowerCamelCase = self.index_for_timestep(UpperCamelCase_ )
if self.state_in_first_order:
__lowerCamelCase = self.sigmas[step_index]
else:
__lowerCamelCase = self.sigmas_interpol[step_index]
__lowerCamelCase = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: Union[str, torch.device] = None , UpperCamelCase_: Optional[int] = None , ):
__lowerCamelCase = num_inference_steps
__lowerCamelCase = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__lowerCamelCase = np.linspace(0 , num_train_timesteps - 1 , UpperCamelCase_ , dtype=UpperCamelCase_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__lowerCamelCase = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowerCamelCase = (np.arange(0 , UpperCamelCase_ ) * step_ratio).round()[::-1].copy().astype(UpperCamelCase_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__lowerCamelCase = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowerCamelCase = (np.arange(UpperCamelCase_ , 0 , -step_ratio )).round().copy().astype(UpperCamelCase_ )
timesteps -= 1
else:
raise ValueError(
F'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
__lowerCamelCase = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__lowerCamelCase = torch.from_numpy(np.log(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = np.interp(UpperCamelCase_ , np.arange(0 , len(UpperCamelCase_ ) ) , UpperCamelCase_ )
__lowerCamelCase = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__lowerCamelCase = torch.from_numpy(UpperCamelCase_ ).to(device=UpperCamelCase_ )
# interpolate sigmas
__lowerCamelCase = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
__lowerCamelCase = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__lowerCamelCase = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(UpperCamelCase_ ).startswith("""mps""" ):
# mps does not support float64
__lowerCamelCase = torch.from_numpy(UpperCamelCase_ ).to(UpperCamelCase_ , dtype=torch.floataa )
else:
__lowerCamelCase = torch.from_numpy(UpperCamelCase_ ).to(UpperCamelCase_ )
# interpolate timesteps
__lowerCamelCase = self.sigma_to_t(UpperCamelCase_ ).to(UpperCamelCase_ , dtype=timesteps.dtype )
__lowerCamelCase = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
__lowerCamelCase = torch.cat([timesteps[:1], interleaved_timesteps] )
__lowerCamelCase = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__lowerCamelCase = defaultdict(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
# get log sigma
__lowerCamelCase = sigma.log()
# get distribution
__lowerCamelCase = log_sigma - self.log_sigmas[:, None]
# get sigmas range
__lowerCamelCase = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__lowerCamelCase = low_idx + 1
__lowerCamelCase = self.log_sigmas[low_idx]
__lowerCamelCase = self.log_sigmas[high_idx]
# interpolate sigmas
__lowerCamelCase = (low - log_sigma) / (low - high)
__lowerCamelCase = w.clamp(0 , 1 )
# transform interpolation to time range
__lowerCamelCase = (1 - w) * low_idx + w * high_idx
__lowerCamelCase = t.view(sigma.shape )
return t
@property
def lowerCAmelCase__ ( self: Dict ):
return self.sample is None
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Union[torch.FloatTensor, np.ndarray] , UpperCamelCase_: Union[float, torch.FloatTensor] , UpperCamelCase_: Union[torch.FloatTensor, np.ndarray] , UpperCamelCase_: bool = True , ):
__lowerCamelCase = self.index_for_timestep(UpperCamelCase_ )
# advance index counter by 1
__lowerCamelCase = timestep.cpu().item() if torch.is_tensor(UpperCamelCase_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__lowerCamelCase = self.sigmas[step_index]
__lowerCamelCase = self.sigmas_interpol[step_index + 1]
__lowerCamelCase = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__lowerCamelCase = self.sigmas[step_index - 1]
__lowerCamelCase = self.sigmas_interpol[step_index]
__lowerCamelCase = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__lowerCamelCase = 0
__lowerCamelCase = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__lowerCamelCase = sigma_hat if self.state_in_first_order else sigma_interpol
__lowerCamelCase = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__lowerCamelCase = sigma_hat if self.state_in_first_order else sigma_interpol
__lowerCamelCase = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("""prediction_type not implemented yet: sample""" )
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__lowerCamelCase = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__lowerCamelCase = sigma_interpol - sigma_hat
# store for 2nd order step
__lowerCamelCase = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__lowerCamelCase = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__lowerCamelCase = sigma_next - sigma_hat
__lowerCamelCase = self.sample
__lowerCamelCase = None
__lowerCamelCase = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__lowerCamelCase = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(UpperCamelCase_ ):
# mps does not support float64
__lowerCamelCase = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__lowerCamelCase = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__lowerCamelCase = self.timesteps.to(original_samples.device )
__lowerCamelCase = timesteps.to(original_samples.device )
__lowerCamelCase = [self.index_for_timestep(UpperCamelCase_ , UpperCamelCase_ ) for t in timesteps]
__lowerCamelCase = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__lowerCamelCase = sigma.unsqueeze(-1 )
__lowerCamelCase = original_samples + noise * sigma
return noisy_samples
def __len__( self: Tuple ):
return self.config.num_train_timesteps
| 29
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {'vocab_file': 'vocab.txt'}
UpperCAmelCase_ = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
UpperCAmelCase_ = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
UpperCAmelCase_ = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : int = VOCAB_FILES_NAMES
UpperCAmelCase__ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : int = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Dict = ConvBertTokenizer
def __init__( self: Dict , UpperCamelCase_: Tuple=None , UpperCamelCase_: List[str]=None , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: List[Any]="[UNK]" , UpperCamelCase_: Any="[SEP]" , UpperCamelCase_: List[Any]="[PAD]" , UpperCamelCase_: Dict="[CLS]" , UpperCamelCase_: List[Any]="[MASK]" , UpperCamelCase_: Dict=True , UpperCamelCase_: List[str]=None , **UpperCamelCase_: Optional[Any] , ):
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
__lowerCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , UpperCamelCase_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , UpperCamelCase_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , UpperCamelCase_ ) != tokenize_chinese_chars
):
__lowerCamelCase = getattr(UpperCamelCase_ , normalizer_state.pop("""type""" ) )
__lowerCamelCase = do_lower_case
__lowerCamelCase = strip_accents
__lowerCamelCase = tokenize_chinese_chars
__lowerCamelCase = normalizer_class(**UpperCamelCase_ )
__lowerCamelCase = do_lower_case
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: str=None ):
__lowerCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ):
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ):
__lowerCamelCase = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
| 358
|
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Union[str, Any] = IFImgaImgSuperResolutionPipeline
UpperCAmelCase__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'}
UpperCAmelCase__ : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'})
UpperCAmelCase__ : Tuple = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCAmelCase__ ( self: Optional[int] ):
return self._get_superresolution_dummy_components()
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Any , UpperCamelCase_: Dict=0 ):
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase__ ( self: Dict ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCAmelCase__ ( self: int ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCAmelCase__ ( self: Optional[Any] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCAmelCase__ ( self: Optional[Any] ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCAmelCase__ ( self: List[str] ):
self._test_save_load_local()
def lowerCAmelCase__ ( self: List[Any] ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 29
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
UpperCAmelCase_ = {'tokenization_herbert': ['HerbertTokenizer']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['HerbertTokenizerFast']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 359
|
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
__lowerCamelCase = [False] * len(A__ )
__lowerCamelCase = [-1] * len(A__ )
def dfs(A__ : Optional[int] , A__ : Optional[int] ):
__lowerCamelCase = True
__lowerCamelCase = c
for u in graph[v]:
if not visited[u]:
dfs(A__ , 1 - c )
for i in range(len(A__ ) ):
if not visited[i]:
dfs(A__ , 0 )
for i in range(len(A__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
UpperCAmelCase_ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 29
| 0
|
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def lowerCamelCase__ ( A__ : Namespace ):
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
UpperCAmelCase_ = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class lowerCamelCase__( __lowerCamelCase):
@staticmethod
def lowerCAmelCase__ ( UpperCamelCase_: ArgumentParser ):
__lowerCamelCase = parser.add_parser(
"""convert""" , help="""CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.""" , )
train_parser.add_argument("""--model_type""" , type=UpperCamelCase_ , required=UpperCamelCase_ , help="""Model's type.""" )
train_parser.add_argument(
"""--tf_checkpoint""" , type=UpperCamelCase_ , required=UpperCamelCase_ , help="""TensorFlow checkpoint path or folder.""" )
train_parser.add_argument(
"""--pytorch_dump_output""" , type=UpperCamelCase_ , required=UpperCamelCase_ , help="""Path to the PyTorch saved model output.""" )
train_parser.add_argument("""--config""" , type=UpperCamelCase_ , default="""""" , help="""Configuration file path or folder.""" )
train_parser.add_argument(
"""--finetuning_task_name""" , type=UpperCamelCase_ , default=UpperCamelCase_ , help="""Optional fine-tuning task name if the TF model was a finetuned model.""" , )
train_parser.set_defaults(func=UpperCamelCase_ )
def __init__( self: List[str] , UpperCamelCase_: str , UpperCamelCase_: str , UpperCamelCase_: str , UpperCamelCase_: str , UpperCamelCase_: str , *UpperCamelCase_: Optional[Any] , ):
__lowerCamelCase = logging.get_logger("""transformers-cli/converting""" )
self._logger.info(F'Loading model {model_type}' )
__lowerCamelCase = model_type
__lowerCamelCase = tf_checkpoint
__lowerCamelCase = pytorch_dump_output
__lowerCamelCase = config
__lowerCamelCase = finetuning_task_name
def lowerCAmelCase__ ( self: Any ):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCamelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCamelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCamelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(UpperCamelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCamelCase_ )
if "ckpt" in self._tf_checkpoint.lower():
__lowerCamelCase = self._tf_checkpoint
__lowerCamelCase = """"""
else:
__lowerCamelCase = self._tf_checkpoint
__lowerCamelCase = """"""
convert_transfo_xl_checkpoint_to_pytorch(
UpperCamelCase_ , self._config , self._pytorch_dump_output , UpperCamelCase_ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCamelCase_ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCamelCase_ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"""--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]""" )
| 360
|
from __future__ import annotations
UpperCAmelCase_ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCamelCase__:
def __init__( self: Tuple , UpperCamelCase_: dict[str, list[str]] , UpperCamelCase_: str ):
__lowerCamelCase = graph
# mapping node to its parent in resulting breadth first tree
__lowerCamelCase = {}
__lowerCamelCase = source_vertex
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = {self.source_vertex}
__lowerCamelCase = None
__lowerCamelCase = [self.source_vertex] # first in first out queue
while queue:
__lowerCamelCase = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(UpperCamelCase_ )
__lowerCamelCase = vertex
queue.append(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: str ):
if target_vertex == self.source_vertex:
return self.source_vertex
__lowerCamelCase = self.parent.get(UpperCamelCase_ )
if target_vertex_parent is None:
__lowerCamelCase = (
F'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(UpperCamelCase_ )
return self.shortest_path(UpperCamelCase_ ) + F'->{target_vertex}'
if __name__ == "__main__":
UpperCAmelCase_ = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 29
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ = {
'configuration_lxmert': ['LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LxmertConfig'],
'tokenization_lxmert': ['LxmertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['LxmertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'LxmertEncoder',
'LxmertForPreTraining',
'LxmertForQuestionAnswering',
'LxmertModel',
'LxmertPreTrainedModel',
'LxmertVisualFeatureEncoder',
'LxmertXLayer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLxmertForPreTraining',
'TFLxmertMainLayer',
'TFLxmertModel',
'TFLxmertPreTrainedModel',
'TFLxmertVisualFeatureEncoder',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 361
|
from math import ceil, sqrt
def lowerCamelCase__ ( A__ : int = 1000000 ):
'''simple docstring'''
__lowerCamelCase = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
__lowerCamelCase = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
__lowerCamelCase = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 29
| 0
|
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : str = 'segformer'
def __init__( self: Union[str, Any] , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: Any=4 , UpperCamelCase_: int=[2, 2, 2, 2] , UpperCamelCase_: Optional[Any]=[8, 4, 2, 1] , UpperCamelCase_: Union[str, Any]=[32, 64, 1_60, 2_56] , UpperCamelCase_: int=[7, 3, 3, 3] , UpperCamelCase_: Dict=[4, 2, 2, 2] , UpperCamelCase_: str=[1, 2, 5, 8] , UpperCamelCase_: List[str]=[4, 4, 4, 4] , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: List[Any]=0.0 , UpperCamelCase_: List[Any]=0.0 , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: Optional[int]=1E-6 , UpperCamelCase_: Optional[int]=2_56 , UpperCamelCase_: Optional[Any]=2_55 , **UpperCamelCase_: List[Any] , ):
super().__init__(**UpperCamelCase_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , UpperCamelCase_ , )
__lowerCamelCase = num_channels
__lowerCamelCase = num_encoder_blocks
__lowerCamelCase = depths
__lowerCamelCase = sr_ratios
__lowerCamelCase = hidden_sizes
__lowerCamelCase = patch_sizes
__lowerCamelCase = strides
__lowerCamelCase = mlp_ratios
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = classifier_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = drop_path_rate
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = decoder_hidden_size
__lowerCamelCase = kwargs.get("""reshape_last_stage""" , UpperCamelCase_ )
__lowerCamelCase = semantic_loss_ignore_index
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Any = version.parse('1.11')
@property
def lowerCAmelCase__ ( self: Any ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase__ ( self: Union[str, Any] ):
return 1E-4
@property
def lowerCAmelCase__ ( self: Dict ):
return 12
| 362
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Optional[int] = IFInpaintingPipeline
UpperCAmelCase__ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
UpperCAmelCase__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase__ : List[str] = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCAmelCase__ ( self: List[str] ):
return self._get_dummy_components()
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Dict , UpperCamelCase_: str=0 ):
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase__ ( self: Union[str, Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCAmelCase__ ( self: Union[str, Any] ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCAmelCase__ ( self: Optional[int] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCAmelCase__ ( self: Any ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCAmelCase__ ( self: str ):
self._test_save_load_local()
def lowerCAmelCase__ ( self: str ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 29
| 0
|
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : str = DistilBertTokenizer
UpperCAmelCase__ : Union[str, Any] = DistilBertTokenizerFast
UpperCAmelCase__ : Optional[Any] = True
@slow
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
__lowerCamelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 363
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase)
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: str , **UpperCamelCase_: int ):
super().__init__(**UpperCamelCase_ )
if self.framework == "tf":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
requires_backends(self , """vision""" )
self.check_model_type(UpperCamelCase_ )
def __call__( self: Union[str, Any] , UpperCamelCase_: Union[str, "Image.Image", List[Dict[str, Any]]] , UpperCamelCase_: Union[str, List[str]] = None , **UpperCamelCase_: List[str] , ):
if "text_queries" in kwargs:
__lowerCamelCase = kwargs.pop("""text_queries""" )
if isinstance(UpperCamelCase_ , (str, Image.Image) ):
__lowerCamelCase = {"""image""": image, """candidate_labels""": candidate_labels}
else:
__lowerCamelCase = image
__lowerCamelCase = super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
return results
def lowerCAmelCase__ ( self: List[str] , **UpperCamelCase_: Dict ):
__lowerCamelCase = {}
if "threshold" in kwargs:
__lowerCamelCase = kwargs["""threshold"""]
if "top_k" in kwargs:
__lowerCamelCase = kwargs["""top_k"""]
return {}, {}, postprocess_params
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = load_image(inputs["""image"""] )
__lowerCamelCase = inputs["""candidate_labels"""]
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = candidate_labels.split(""",""" )
__lowerCamelCase = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(UpperCamelCase_ ):
__lowerCamelCase = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework )
__lowerCamelCase = self.image_processor(UpperCamelCase_ , return_tensors=self.framework )
yield {
"is_last": i == len(UpperCamelCase_ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Tuple ):
__lowerCamelCase = model_inputs.pop("""target_size""" )
__lowerCamelCase = model_inputs.pop("""candidate_label""" )
__lowerCamelCase = model_inputs.pop("""is_last""" )
__lowerCamelCase = self.model(**UpperCamelCase_ )
__lowerCamelCase = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs}
return model_outputs
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: Dict=0.1 , UpperCamelCase_: Union[str, Any]=None ):
__lowerCamelCase = []
for model_output in model_outputs:
__lowerCamelCase = model_output["""candidate_label"""]
__lowerCamelCase = BaseModelOutput(UpperCamelCase_ )
__lowerCamelCase = self.image_processor.post_process_object_detection(
outputs=UpperCamelCase_ , threshold=UpperCamelCase_ , target_sizes=model_output["""target_size"""] )[0]
for index in outputs["scores"].nonzero():
__lowerCamelCase = outputs["""scores"""][index].item()
__lowerCamelCase = self._get_bounding_box(outputs["""boxes"""][index][0] )
__lowerCamelCase = {"""score""": score, """label""": label, """box""": box}
results.append(UpperCamelCase_ )
__lowerCamelCase = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : x["score"] , reverse=UpperCamelCase_ )
if top_k:
__lowerCamelCase = results[:top_k]
return results
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: "torch.Tensor" ):
if self.framework != "pt":
raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""" )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = box.int().tolist()
__lowerCamelCase = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 29
| 0
|
def lowerCamelCase__ ( A__ : list[int] ):
'''simple docstring'''
if not numbers:
return 0
if not isinstance(A__ , (list, tuple) ) or not all(
isinstance(A__ , A__ ) for number in numbers ):
raise ValueError("""numbers must be an iterable of integers""" )
__lowerCamelCase : int = numbers[0]
for i in range(1 , len(A__ ) ):
# update the maximum and minimum subarray products
__lowerCamelCase : Union[str, Any] = numbers[i]
if number < 0:
__lowerCamelCase, __lowerCamelCase : Dict = min_till_now, max_till_now
__lowerCamelCase : Any = max(A__ , max_till_now * number )
__lowerCamelCase : Any = min(A__ , min_till_now * number )
# update the maximum product found till now
__lowerCamelCase : Tuple = max(A__ , A__ )
return max_prod
| 364
|
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece.model')
UpperCAmelCase_ = {'target_lang': 'fi', 'source_lang': 'en'}
UpperCAmelCase_ = '>>zh<<'
UpperCAmelCase_ = 'Helsinki-NLP/'
if is_torch_available():
UpperCAmelCase_ = 'pt'
elif is_tf_available():
UpperCAmelCase_ = 'tf'
else:
UpperCAmelCase_ = 'jax'
@require_sentencepiece
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Union[str, Any] = MarianTokenizer
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : int = True
def lowerCAmelCase__ ( self: Union[str, Any] ):
super().setUp()
__lowerCamelCase = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
__lowerCamelCase = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__lowerCamelCase = Path(self.tmpdirname )
save_json(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
__lowerCamelCase = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self: Optional[Any] , **UpperCamelCase_: Any ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Optional[int] ):
return (
"This is a test",
"This is a test",
)
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = """</s>"""
__lowerCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(UpperCamelCase_ ) , 9 )
def lowerCAmelCase__ ( self: Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = MarianTokenizer.from_pretrained(F'{ORG_NAME}opus-mt-en-de' )
__lowerCamelCase = en_de_tokenizer(["""I am a small frog"""] , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = [38, 1_21, 14, 6_97, 3_88_48, 0]
self.assertListEqual(UpperCamelCase_ , batch.input_ids[0] )
__lowerCamelCase = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = [x.name for x in Path(UpperCamelCase_ ).glob("""*""" )]
self.assertIn("""source.spm""" , UpperCamelCase_ )
MarianTokenizer.from_pretrained(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = tok(
["""I am a small frog""" * 10_00, """I am a small frog"""] , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 5_12) )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def lowerCAmelCase__ ( self: Optional[int] ):
# fmt: off
__lowerCamelCase = {"""input_ids""": [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase_ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
__lowerCamelCase = """Tämä on testi"""
__lowerCamelCase = """This is a test"""
__lowerCamelCase = [76, 7, 20_47, 2]
__lowerCamelCase = [69, 12, 11, 9_40, 2]
__lowerCamelCase = tokenizer(UpperCamelCase_ ).input_ids
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = tokenizer(text_target=UpperCamelCase_ ).input_ids
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
| 29
| 0
|
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
UpperCAmelCase_ = {
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
UpperCAmelCase_ = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
UpperCAmelCase_ = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
__lowerCamelCase = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def lowerCamelCase__ ( A__ : tuple ):
'''simple docstring'''
return x[0]
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
__lowerCamelCase = get_letter_count(A__ )
__lowerCamelCase = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(A__ )
__lowerCamelCase = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=A__ )
__lowerCamelCase = """""".join(freq_to_letter[freq] )
__lowerCamelCase = list(freq_to_letter_str.items() )
freq_pairs.sort(key=A__ , reverse=A__ )
__lowerCamelCase = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(A__ )
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
__lowerCamelCase = get_frequency_order(A__ )
__lowerCamelCase = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class lowerCamelCase__( unittest.TestCase):
@parameterized.expand([(None,), ("""foo.json""",)] )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: List[str] ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCamelCase_ , config_name=UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ , config_name=UpperCamelCase_ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , UpperCamelCase_ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = AutoConfig.from_pretrained("""gpt2""" )
__lowerCamelCase = GenerationConfig.from_model_config(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = GenerationConfig()
__lowerCamelCase = {
"""max_new_tokens""": 10_24,
"""foo""": """bar""",
}
__lowerCamelCase = copy.deepcopy(UpperCamelCase_ )
__lowerCamelCase = generation_config.update(**UpperCamelCase_ )
# update_kwargs was not modified (no side effects)
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 10_24 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(UpperCamelCase_ , {"""foo""": """bar"""} )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = GenerationConfig()
__lowerCamelCase = """bar"""
with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir:
generation_config.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""" )
__lowerCamelCase = GenerationConfig.from_model_config(UpperCamelCase_ )
assert not hasattr(UpperCamelCase_ , """foo""" ) # no new kwargs should be initialized if from config
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , UpperCamelCase_ )
self.assertEqual(default_config.num_beams , 1 )
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , UpperCamelCase_ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , UpperCamelCase_ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class lowerCamelCase__( unittest.TestCase):
@classmethod
def lowerCAmelCase__ ( cls: Optional[Any] ):
__lowerCamelCase = TOKEN
HfFolder.save_token(UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: str ):
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" )
except HTTPError:
pass
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained(F'{USER}/test-generation-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCamelCase_ , repo_id="""test-generation-config""" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained(F'{USER}/test-generation-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCamelCase_ , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
| 29
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 366
|
def lowerCamelCase__ ( A__ : list ):
'''simple docstring'''
for i in range(len(A__ ) - 1 , 0 , -1 ):
__lowerCamelCase = False
for j in range(A__ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
__lowerCamelCase, __lowerCamelCase = unsorted[j - 1], unsorted[j]
__lowerCamelCase = True
for j in range(A__ ):
if unsorted[j] > unsorted[j + 1]:
__lowerCamelCase, __lowerCamelCase = unsorted[j + 1], unsorted[j]
__lowerCamelCase = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ = [int(item) for item in user_input.split(',')]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 29
| 0
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase_ = 16
UpperCAmelCase_ = 32
def lowerCamelCase__ ( A__ : Accelerator , A__ : int = 16 ):
'''simple docstring'''
__lowerCamelCase = AutoTokenizer.from_pretrained('bert-base-cased' )
__lowerCamelCase = load_dataset('glue' , 'mrpc' )
def tokenize_function(A__ : int ):
# max_length=None => use the model max length (it's actually the default)
__lowerCamelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=A__ , max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowerCamelCase = datasets.map(
A__ , batched=A__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCamelCase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(A__ : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowerCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowerCamelCase = 16
elif accelerator.mixed_precision != "no":
__lowerCamelCase = 8
else:
__lowerCamelCase = None
return tokenizer.pad(
A__ , padding='longest' , max_length=A__ , pad_to_multiple_of=A__ , return_tensors='pt' , )
# Instantiate dataloaders.
__lowerCamelCase = DataLoader(
tokenized_datasets['train'] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
__lowerCamelCase = DataLoader(
tokenized_datasets['validation'] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase_ = mocked_dataloaders # noqa: F811
def lowerCamelCase__ ( A__ : List[Any] , A__ : Tuple ):
'''simple docstring'''
if os.environ.get('TESTING_MOCKED_DATALOADERS' , A__ ) == "1":
__lowerCamelCase = 2
# New Code #
__lowerCamelCase = int(args.gradient_accumulation_steps )
__lowerCamelCase = int(args.local_sgd_steps )
# Initialize accelerator
__lowerCamelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=A__ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCamelCase = config['lr']
__lowerCamelCase = int(config['num_epochs'] )
__lowerCamelCase = int(config['seed'] )
__lowerCamelCase = int(config['batch_size'] )
__lowerCamelCase = evaluate.load('glue' , 'mrpc' )
set_seed(A__ )
__lowerCamelCase, __lowerCamelCase = get_dataloaders(A__ , A__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCamelCase = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=A__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCamelCase = model.to(accelerator.device )
# Instantiate optimizer
__lowerCamelCase = AdamW(params=model.parameters() , lr=A__ )
# Instantiate scheduler
__lowerCamelCase = get_linear_schedule_with_warmup(
optimizer=A__ , num_warmup_steps=100 , num_training_steps=(len(A__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# Now we train the model
for epoch in range(A__ ):
model.train()
with LocalSGD(
accelerator=A__ , model=A__ , local_sgd_steps=A__ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(A__ ):
__lowerCamelCase = model(**A__ )
__lowerCamelCase = output.loss
accelerator.backward(A__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCamelCase = model(**A__ )
__lowerCamelCase = outputs.logits.argmax(dim=-1 )
__lowerCamelCase, __lowerCamelCase = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=A__ , references=A__ , )
__lowerCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , A__ )
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=A__ , default=A__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=A__ , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument(
'--local_sgd_steps' , type=A__ , default=8 , help='Number of local SGD steps or None to disable local SGD' )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 367
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowerCamelCase__ ( A__ : Dict , A__ : Optional[int]=False ):
'''simple docstring'''
try:
__lowerCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__lowerCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
__lowerCamelCase = strtobool(A__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
UpperCAmelCase_ = parse_flag_from_env('RUN_SLOW', default=False)
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skip("""Test was skipped""" )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , """test is slow""" )(A__ )
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , """test requires only a CPU""" )(A__ )
def lowerCamelCase__ ( A__ : List[str] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , """test requires a GPU""" )(A__ )
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , """test requires a XPU""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , """test requires a `mps` backend support in `torch`""" )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , """test requires the Hugging Face suite""" )(A__ )
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , """test requires the bitsandbytes library""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , """test requires TPU""" )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , """test requires a GPU""" )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , """test requires a XPU""" )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , """test requires multiple GPUs""" )(A__ )
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , """test requires multiple XPUs""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , """test requires safetensors""" )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , """test requires DeepSpeed""" )(A__ )
def lowerCamelCase__ ( A__ : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version(""">=""" , """1.12.0""" ) , """test requires torch version >= 1.12.0""" )(A__ )
def lowerCamelCase__ ( A__ : Tuple=None , A__ : Optional[Any]=None ):
'''simple docstring'''
if test_case is None:
return partial(A__ , version=A__ )
return unittest.skipUnless(is_torch_version(""">=""" , A__ ) , f'test requires torch version >= {version}' )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , """test requires Tensorboard""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , """test requires wandb""" )(A__ )
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , """test requires comet_ml""" )(A__ )
UpperCAmelCase_ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , """test requires at least one tracker to be available and for `comet_ml` to not be installed""" , )(A__ )
class lowerCamelCase__( unittest.TestCase):
UpperCAmelCase__ : List[Any] = True
@classmethod
def lowerCAmelCase__ ( cls: int ):
__lowerCamelCase = tempfile.mkdtemp()
@classmethod
def lowerCAmelCase__ ( cls: Any ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def lowerCAmelCase__ ( self: Any ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("""**/*""" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCamelCase_ )
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: int ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Union[mock.Mock, List[mock.Mock]] ):
__lowerCamelCase = mocks if isinstance(UpperCamelCase_ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase = AcceleratorState()
__lowerCamelCase = tensor[None].clone().to(state.device )
__lowerCamelCase = gather(A__ ).cpu()
__lowerCamelCase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , A__ ):
return False
return True
class lowerCamelCase__:
def __init__( self: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: Any , UpperCamelCase_: Any ):
__lowerCamelCase = returncode
__lowerCamelCase = stdout
__lowerCamelCase = stderr
async def lowerCamelCase__ ( A__ : int , A__ : Any ):
'''simple docstring'''
while True:
__lowerCamelCase = await stream.readline()
if line:
callback(A__ )
else:
break
async def lowerCamelCase__ ( A__ : Dict , A__ : List[str]=None , A__ : Any=None , A__ : Optional[Any]=None , A__ : Tuple=False , A__ : List[Any]=False ):
'''simple docstring'''
if echo:
print("""\nRunning: """ , """ """.join(A__ ) )
__lowerCamelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=A__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=A__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__lowerCamelCase = []
__lowerCamelCase = []
def tee(A__ : int , A__ : Any , A__ : Optional[Any] , A__ : int="" ):
__lowerCamelCase = line.decode("""utf-8""" ).rstrip()
sink.append(A__ )
if not quiet:
print(A__ , A__ , file=A__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda A__ : tee(A__ , A__ , sys.stdout , label="""stdout:""" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda A__ : tee(A__ , A__ , sys.stderr , label="""stderr:""" ) ) ),
] , timeout=A__ , )
return _RunOutput(await p.wait() , A__ , A__ )
def lowerCamelCase__ ( A__ : Optional[Any] , A__ : Any=None , A__ : Union[str, Any]=None , A__ : Dict=180 , A__ : str=False , A__ : List[Any]=True ):
'''simple docstring'''
__lowerCamelCase = asyncio.get_event_loop()
__lowerCamelCase = loop.run_until_complete(
_stream_subprocess(A__ , env=A__ , stdin=A__ , timeout=A__ , quiet=A__ , echo=A__ ) )
__lowerCamelCase = """ """.join(A__ )
if result.returncode > 0:
__lowerCamelCase = """\n""".join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
return result
class lowerCamelCase__( __lowerCamelCase):
pass
def lowerCamelCase__ ( A__ : List[str] , A__ : Union[str, Any]=False ):
'''simple docstring'''
try:
__lowerCamelCase = subprocess.check_output(A__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(A__ , """decode""" ):
__lowerCamelCase = output.decode("""utf-8""" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'Command `{" ".join(A__ )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 29
| 0
|
UpperCAmelCase_ = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
UpperCAmelCase_ = ['a', 'b', 'c', 'd', 'e']
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : Optional[int] , A__ : str ):
'''simple docstring'''
__lowerCamelCase = start
# add current to visited
visited.append(A__ )
__lowerCamelCase = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__lowerCamelCase = topological_sort(A__ , A__ , A__ )
# if all neighbors visited add current to sort
sort.append(A__ )
# if all vertices haven't been visited select a new one to visit
if len(A__ ) != len(A__ ):
for vertice in vertices:
if vertice not in visited:
__lowerCamelCase = topological_sort(A__ , A__ , A__ )
# return sort
return sort
if __name__ == "__main__":
UpperCAmelCase_ = topological_sort('a', [], [])
print(sort)
| 368
|
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
UpperCAmelCase_ = datasets.utils.logging.get_logger(__name__)
class lowerCamelCase__( folder_based_builder.FolderBasedBuilderConfig):
UpperCAmelCase__ : bool = None
UpperCAmelCase__ : bool = None
class lowerCamelCase__( folder_based_builder.FolderBasedBuilder):
UpperCAmelCase__ : List[Any] = datasets.Audio()
UpperCAmelCase__ : str = 'audio'
UpperCAmelCase__ : Union[str, Any] = AudioFolderConfig
UpperCAmelCase__ : List[str] # definition at the bottom of the script
UpperCAmelCase__ : Optional[int] = AudioClassification(audio_column='audio' , label_column='label')
UpperCAmelCase_ = [
'.aiff',
'.au',
'.avr',
'.caf',
'.flac',
'.htk',
'.svx',
'.mat4',
'.mat5',
'.mpc2k',
'.ogg',
'.paf',
'.pvf',
'.raw',
'.rf64',
'.sd2',
'.sds',
'.ircam',
'.voc',
'.w64',
'.wav',
'.nist',
'.wavex',
'.wve',
'.xi',
'.mp3',
'.opus',
]
UpperCAmelCase_ = AUDIO_EXTENSIONS
| 29
| 0
|
from math import sqrt
def lowerCamelCase__ ( A__ : int = 1000000 ):
'''simple docstring'''
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(A__ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f"""{solution() = }""")
| 369
|
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : str = 'segformer'
def __init__( self: Union[str, Any] , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: Any=4 , UpperCamelCase_: int=[2, 2, 2, 2] , UpperCamelCase_: Optional[Any]=[8, 4, 2, 1] , UpperCamelCase_: Union[str, Any]=[32, 64, 1_60, 2_56] , UpperCamelCase_: int=[7, 3, 3, 3] , UpperCamelCase_: Dict=[4, 2, 2, 2] , UpperCamelCase_: str=[1, 2, 5, 8] , UpperCamelCase_: List[str]=[4, 4, 4, 4] , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: List[Any]=0.0 , UpperCamelCase_: List[Any]=0.0 , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: Optional[int]=1E-6 , UpperCamelCase_: Optional[int]=2_56 , UpperCamelCase_: Optional[Any]=2_55 , **UpperCamelCase_: List[Any] , ):
super().__init__(**UpperCamelCase_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , UpperCamelCase_ , )
__lowerCamelCase = num_channels
__lowerCamelCase = num_encoder_blocks
__lowerCamelCase = depths
__lowerCamelCase = sr_ratios
__lowerCamelCase = hidden_sizes
__lowerCamelCase = patch_sizes
__lowerCamelCase = strides
__lowerCamelCase = mlp_ratios
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = classifier_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = drop_path_rate
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = decoder_hidden_size
__lowerCamelCase = kwargs.get("""reshape_last_stage""" , UpperCamelCase_ )
__lowerCamelCase = semantic_loss_ignore_index
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Any = version.parse('1.11')
@property
def lowerCAmelCase__ ( self: Any ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase__ ( self: Union[str, Any] ):
return 1E-4
@property
def lowerCAmelCase__ ( self: Dict ):
return 12
| 29
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ = {'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['BeitFeatureExtractor']
UpperCAmelCase_ = ['BeitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BeitForImageClassification',
'BeitForMaskedImageModeling',
'BeitForSemanticSegmentation',
'BeitModel',
'BeitPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'FlaxBeitForImageClassification',
'FlaxBeitForMaskedImageModeling',
'FlaxBeitModel',
'FlaxBeitPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 370
|
import string
import numpy
def lowerCamelCase__ ( A__ : int , A__ : int ):
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a , A__ )
class lowerCamelCase__:
UpperCAmelCase__ : Optional[int] = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
UpperCAmelCase__ : Optional[int] = numpy.vectorize(lambda __lowerCamelCase: x % 36)
UpperCAmelCase__ : List[Any] = numpy.vectorize(__lowerCamelCase)
def __init__( self: List[Any] , UpperCamelCase_: numpy.ndarray ):
__lowerCamelCase = self.modulus(UpperCamelCase_ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__lowerCamelCase = encrypt_key.shape[0]
def lowerCAmelCase__ ( self: str , UpperCamelCase_: str ):
return self.key_string.index(UpperCamelCase_ )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: int ):
return self.key_string[round(UpperCamelCase_ )]
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__lowerCamelCase = det % len(self.key_string )
__lowerCamelCase = len(self.key_string )
if greatest_common_divisor(UpperCamelCase_ , len(self.key_string ) ) != 1:
__lowerCamelCase = (
F'determinant modular {req_l} of encryption key({det}) '
F'is not co prime w.r.t {req_l}.\nTry another key.'
)
raise ValueError(UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: str ):
__lowerCamelCase = [char for char in text.upper() if char in self.key_string]
__lowerCamelCase = chars[-1]
while len(UpperCamelCase_ ) % self.break_key != 0:
chars.append(UpperCamelCase_ )
return "".join(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
__lowerCamelCase = self.process_text(text.upper() )
__lowerCamelCase = """"""
for i in range(0 , len(UpperCamelCase_ ) - self.break_key + 1 , self.break_key ):
__lowerCamelCase = text[i : i + self.break_key]
__lowerCamelCase = [self.replace_letters(UpperCamelCase_ ) for char in batch]
__lowerCamelCase = numpy.array([vec] ).T
__lowerCamelCase = self.modulus(self.encrypt_key.dot(UpperCamelCase_ ) ).T.tolist()[
0
]
__lowerCamelCase = """""".join(
self.replace_digits(UpperCamelCase_ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__lowerCamelCase = det % len(self.key_string )
__lowerCamelCase = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
__lowerCamelCase = i
break
__lowerCamelCase = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
__lowerCamelCase = self.make_decrypt_key()
__lowerCamelCase = self.process_text(text.upper() )
__lowerCamelCase = """"""
for i in range(0 , len(UpperCamelCase_ ) - self.break_key + 1 , self.break_key ):
__lowerCamelCase = text[i : i + self.break_key]
__lowerCamelCase = [self.replace_letters(UpperCamelCase_ ) for char in batch]
__lowerCamelCase = numpy.array([vec] ).T
__lowerCamelCase = self.modulus(decrypt_key.dot(UpperCamelCase_ ) ).T.tolist()[0]
__lowerCamelCase = """""".join(
self.replace_digits(UpperCamelCase_ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = int(input("""Enter the order of the encryption key: """ ) )
__lowerCamelCase = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(A__ ):
__lowerCamelCase = [int(A__ ) for x in input().split()]
hill_matrix.append(A__ )
__lowerCamelCase = HillCipher(numpy.array(A__ ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
__lowerCamelCase = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
__lowerCamelCase = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(A__ ) )
elif option == "2":
__lowerCamelCase = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(A__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 29
| 0
|
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
@register_to_config
def __init__( self: Optional[Any] , UpperCamelCase_: bool , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[int] = None ):
super().__init__()
__lowerCamelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
__lowerCamelCase = torch.zeros(UpperCamelCase_ , UpperCamelCase_ )
else:
__lowerCamelCase = None
__lowerCamelCase = torch.nn.Parameter(UpperCamelCase_ )
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : VQModel
UpperCAmelCase__ : CLIPTextModel
UpperCAmelCase__ : CLIPTokenizer
UpperCAmelCase__ : TransformeraDModel
UpperCAmelCase__ : LearnedClassifierFreeSamplingEmbeddings
UpperCAmelCase__ : VQDiffusionScheduler
def __init__( self: Optional[int] , UpperCamelCase_: VQModel , UpperCamelCase_: CLIPTextModel , UpperCamelCase_: CLIPTokenizer , UpperCamelCase_: TransformeraDModel , UpperCamelCase_: VQDiffusionScheduler , UpperCamelCase_: LearnedClassifierFreeSamplingEmbeddings , ):
super().__init__()
self.register_modules(
vqvae=UpperCamelCase_ , transformer=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , scheduler=UpperCamelCase_ , learned_classifier_free_sampling_embeddings=UpperCamelCase_ , )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = len(UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else 1
# get prompt text embeddings
__lowerCamelCase = self.tokenizer(
UpperCamelCase_ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__lowerCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__lowerCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F' {self.tokenizer.model_max_length} tokens: {removed_text}' )
__lowerCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
__lowerCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
__lowerCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=UpperCamelCase_ )
# duplicate text embeddings for each generation per prompt
__lowerCamelCase = prompt_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
__lowerCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings
__lowerCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(UpperCamelCase_ , 1 , 1 )
else:
__lowerCamelCase = [""""""] * batch_size
__lowerCamelCase = text_input_ids.shape[-1]
__lowerCamelCase = self.tokenizer(
UpperCamelCase_ , padding="""max_length""" , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ , return_tensors="""pt""" , )
__lowerCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
__lowerCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=UpperCamelCase_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowerCamelCase = negative_prompt_embeds.shape[1]
__lowerCamelCase = negative_prompt_embeds.repeat(1 , UpperCamelCase_ , 1 )
__lowerCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , UpperCamelCase_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self: int , UpperCamelCase_: Union[str, List[str]] , UpperCamelCase_: int = 1_00 , UpperCamelCase_: float = 5.0 , UpperCamelCase_: float = 1.0 , UpperCamelCase_: int = 1 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase_: int = 1 , ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = 1
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = len(UpperCamelCase_ )
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(UpperCamelCase_ )}' )
__lowerCamelCase = batch_size * num_images_per_prompt
__lowerCamelCase = guidance_scale > 1.0
__lowerCamelCase = self._encode_prompt(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(UpperCamelCase_ )}.' )
# get the initial completely masked latents unless the user supplied it
__lowerCamelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
__lowerCamelCase = self.transformer.num_vector_embeds - 1
__lowerCamelCase = torch.full(UpperCamelCase_ , UpperCamelCase_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"""Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"""
F' {self.transformer.num_vector_embeds - 1} (inclusive).' )
__lowerCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(UpperCamelCase_ , device=self.device )
__lowerCamelCase = self.scheduler.timesteps.to(self.device )
__lowerCamelCase = latents
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the sample if we are doing classifier free guidance
__lowerCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
__lowerCamelCase = self.transformer(UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , timestep=UpperCamelCase_ ).sample
if do_classifier_free_guidance:
__lowerCamelCase, __lowerCamelCase = model_output.chunk(2 )
__lowerCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(UpperCamelCase_ , dim=1 , keepdim=UpperCamelCase_ )
__lowerCamelCase = self.truncate(UpperCamelCase_ , UpperCamelCase_ )
# remove `log(0)`'s (`-inf`s)
__lowerCamelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase = self.scheduler.step(UpperCamelCase_ , timestep=UpperCamelCase_ , sample=UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = self.vqvae.config.vq_embed_dim
__lowerCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
__lowerCamelCase = self.vqvae.quantize.get_codebook_entry(UpperCamelCase_ , shape=UpperCamelCase_ )
__lowerCamelCase = self.vqvae.decode(UpperCamelCase_ , force_not_quantize=UpperCamelCase_ ).sample
__lowerCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCamelCase = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: float ):
__lowerCamelCase, __lowerCamelCase = torch.sort(UpperCamelCase_ , 1 , descending=UpperCamelCase_ )
__lowerCamelCase = torch.exp(UpperCamelCase_ )
__lowerCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
__lowerCamelCase = torch.full_like(keep_mask[:, 0:1, :] , UpperCamelCase_ )
__lowerCamelCase = torch.cat((all_true, keep_mask) , dim=1 )
__lowerCamelCase = keep_mask[:, :-1, :]
__lowerCamelCase = keep_mask.gather(1 , indices.argsort(1 ) )
__lowerCamelCase = log_p_x_0.clone()
__lowerCamelCase = -torch.inf # -inf = log(0)
return rv
| 371
|
import qiskit
def lowerCamelCase__ ( A__ : int , A__ : int ):
'''simple docstring'''
__lowerCamelCase = qiskit.Aer.get_backend("""aer_simulator""" )
__lowerCamelCase = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
__lowerCamelCase = qiskit.execute(A__ , A__ , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(A__ )
if __name__ == "__main__":
UpperCAmelCase_ = half_adder(1, 1)
print(f"""Half Adder Output Qubit Counts: {counts}""")
| 29
| 0
|
UpperCAmelCase_ = {
0: '0',
1: '1',
2: '2',
3: '3',
4: '4',
5: '5',
6: '6',
7: '7',
8: '8',
9: '9',
10: 'a',
11: 'b',
12: 'c',
13: 'd',
14: 'e',
15: 'f',
}
def lowerCamelCase__ ( A__ : float ):
'''simple docstring'''
assert type(A__ ) in (int, float) and decimal == int(A__ )
__lowerCamelCase = int(A__ )
__lowerCamelCase = """"""
__lowerCamelCase = False
if decimal < 0:
__lowerCamelCase = True
decimal *= -1
while decimal > 0:
__lowerCamelCase, __lowerCamelCase = divmod(A__ , 16 )
__lowerCamelCase = values[remainder] + hexadecimal
__lowerCamelCase = """0x""" + hexadecimal
if negative:
__lowerCamelCase = """-""" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350
|
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = [[0 for _ in range(A__ )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__lowerCamelCase = 1
for n in range(m + 1 ):
for k in range(1 , A__ ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
UpperCAmelCase_ = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
UpperCAmelCase_ = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 29
| 0
|
import math
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
assert isinstance(A__ , A__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
__lowerCamelCase = range(3 , int(math.sqrt(A__ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def lowerCamelCase__ ( A__ : Optional[int] , A__ : Union[str, Any]=1 , **A__ : Dict ):
'''simple docstring'''
__lowerCamelCase = factor * value
__lowerCamelCase = value
while not is_prime(A__ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **A__ )
return value
| 351
|
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase)
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: Tuple , *UpperCamelCase_: Dict , **UpperCamelCase_: Optional[int] ):
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
requires_backends(self , """decord""" )
self.check_model_type(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: int=None , UpperCamelCase_: List[Any]=None , UpperCamelCase_: Optional[int]=None ):
__lowerCamelCase = {}
if frame_sampling_rate is not None:
__lowerCamelCase = frame_sampling_rate
if num_frames is not None:
__lowerCamelCase = num_frames
__lowerCamelCase = {}
if top_k is not None:
__lowerCamelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self: Any , UpperCamelCase_: Union[str, List[str]] , **UpperCamelCase_: str ):
return super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[str]=None , UpperCamelCase_: List[Any]=1 ):
if num_frames is None:
__lowerCamelCase = self.model.config.num_frames
if video.startswith("""http://""" ) or video.startswith("""https://""" ):
__lowerCamelCase = BytesIO(requests.get(UpperCamelCase_ ).content )
__lowerCamelCase = VideoReader(UpperCamelCase_ )
videoreader.seek(0 )
__lowerCamelCase = 0
__lowerCamelCase = num_frames * frame_sampling_rate - 1
__lowerCamelCase = np.linspace(UpperCamelCase_ , UpperCamelCase_ , num=UpperCamelCase_ , dtype=np.intaa )
__lowerCamelCase = videoreader.get_batch(UpperCamelCase_ ).asnumpy()
__lowerCamelCase = list(UpperCamelCase_ )
__lowerCamelCase = self.image_processor(UpperCamelCase_ , return_tensors=self.framework )
return model_inputs
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Any ):
__lowerCamelCase = self.model(**UpperCamelCase_ )
return model_outputs
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: List[Any] , UpperCamelCase_: Optional[int]=5 ):
if top_k > self.model.config.num_labels:
__lowerCamelCase = self.model.config.num_labels
if self.framework == "pt":
__lowerCamelCase = model_outputs.logits.softmax(-1 )[0]
__lowerCamelCase, __lowerCamelCase = probs.topk(UpperCamelCase_ )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
__lowerCamelCase = scores.tolist()
__lowerCamelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase_ , UpperCamelCase_ )]
| 29
| 0
|
from __future__ import annotations
def lowerCamelCase__ ( A__ : tuple[int, int] , A__ : int ):
'''simple docstring'''
__lowerCamelCase, __lowerCamelCase = position
__lowerCamelCase = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
__lowerCamelCase = []
for position in positions:
__lowerCamelCase, __lowerCamelCase = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(A__ )
return permissible_positions
def lowerCamelCase__ ( A__ : list[list[int]] ):
'''simple docstring'''
return not any(elem == 0 for row in board for elem in row )
def lowerCamelCase__ ( A__ : list[list[int]] , A__ : tuple[int, int] , A__ : int ):
'''simple docstring'''
if is_complete(A__ ):
return True
for position in get_valid_pos(A__ , len(A__ ) ):
__lowerCamelCase, __lowerCamelCase = position
if board[y][x] == 0:
__lowerCamelCase = curr + 1
if open_knight_tour_helper(A__ , A__ , curr + 1 ):
return True
__lowerCamelCase = 0
return False
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = [[0 for i in range(A__ )] for j in range(A__ )]
for i in range(A__ ):
for j in range(A__ ):
__lowerCamelCase = 1
if open_knight_tour_helper(A__ , (i, j) , 1 ):
return board
__lowerCamelCase = 0
__lowerCamelCase = f'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352
|
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase)
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: List[Any] , *UpperCamelCase_: Dict , **UpperCamelCase_: Dict ):
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
self.check_model_type(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: str=None , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: Optional[int]=None , **UpperCamelCase_: List[Any] ):
__lowerCamelCase, __lowerCamelCase = {}, {}
if padding is not None:
__lowerCamelCase = padding
if truncation is not None:
__lowerCamelCase = truncation
if top_k is not None:
__lowerCamelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self: Optional[Any] , UpperCamelCase_: Union["Image.Image", str] , UpperCamelCase_: str = None , **UpperCamelCase_: List[str] ):
if isinstance(UpperCamelCase_ , (Image.Image, str) ) and isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = {"""image""": image, """question""": question}
else:
__lowerCamelCase = image
__lowerCamelCase = super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
return results
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: str , UpperCamelCase_: Any=False , UpperCamelCase_: Optional[int]=False ):
__lowerCamelCase = load_image(inputs["""image"""] )
__lowerCamelCase = self.tokenizer(
inputs["""question"""] , return_tensors=self.framework , padding=UpperCamelCase_ , truncation=UpperCamelCase_ )
__lowerCamelCase = self.image_processor(images=UpperCamelCase_ , return_tensors=self.framework )
model_inputs.update(UpperCamelCase_ )
return model_inputs
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: Tuple ):
__lowerCamelCase = self.model(**UpperCamelCase_ )
return model_outputs
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any]=5 ):
if top_k > self.model.config.num_labels:
__lowerCamelCase = self.model.config.num_labels
if self.framework == "pt":
__lowerCamelCase = model_outputs.logits.sigmoid()[0]
__lowerCamelCase, __lowerCamelCase = probs.topk(UpperCamelCase_ )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
__lowerCamelCase = scores.tolist()
__lowerCamelCase = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase_ , UpperCamelCase_ )]
| 29
| 0
|
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Any = XLNetTokenizer
UpperCAmelCase__ : Optional[int] = XLNetTokenizerFast
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : str = True
def lowerCAmelCase__ ( self: Dict ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase = XLNetTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = """<s>"""
__lowerCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<eod>""" )
self.assertEqual(len(UpperCamelCase_ ) , 10_06 )
def lowerCAmelCase__ ( self: Optional[int] ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = XLNetTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ )
__lowerCamelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCamelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [2_85, 46, 10, 1_70, 3_82] )
__lowerCamelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__lowerCamelCase = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = XLNetTokenizer(UpperCamelCase_ , do_lower_case=UpperCamelCase_ )
__lowerCamelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""▁he""", """ll""", """o"""] )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = XLNetTokenizer(UpperCamelCase_ , do_lower_case=UpperCamelCase_ )
__lowerCamelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
@slow
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" )
__lowerCamelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def lowerCAmelCase__ ( self: Optional[int] ):
# fmt: off
__lowerCamelCase = {"""input_ids""": [[17, 2_14_42, 2_70, 17, 10, 1_46_45, 3_18, 34, 17, 45_46, 31_45, 7_87, 13, 77_52, 2_20_18, 23, 21, 17, 45_46, 31_45, 7_87, 13, 33_52, 1_44_31, 13, 55_00, 11, 11_76, 5_80, 13, 1_68_19, 47_97, 23, 17, 10, 1_71_35, 6_58, 19, 4_57, 79_32, 13, 1_84, 19, 31_54, 1_71_35, 64_68, 19, 14_04, 1_22_69, 19, 42_29, 53_56, 1_62_64, 46, 19, 17, 2_05_45, 1_03_95, 9, 9, 9, 11, 28, 64_21, 95_31, 2_07_29, 17, 10, 3_53, 1_70_22, 11, 21, 64_21, 95_31, 1_69_49, 17, 10, 1_15_09, 7_53, 11, 33, 95, 24_21, 73_85, 9_56, 1_44_31, 26_26, 25, 8_42, 73_85, 48_36, 21, 14_29, 22_72, 98_55, 31_20, 1_61, 2_47_38, 19, 1_32_03, 6_58, 2_18, 7_87, 21, 4_30, 1_84_82, 8_47, 26_37, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_22, 2_21_78, 27, 10_64, 22, 9_56, 13, 1_11_01, 14_29, 58_54, 2_43_13, 1_89_53, 40, 4_22, 2_43_66, 68, 17_58, 37, 1_04_83, 1_42_57, 31, 2_07, 2_63, 21, 2_03, 37_73, 25, 71, 97_35, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 20_49, 34_42, 17, 1_38_94, 33_80, 23, 95, 18, 1_76_34, 22_88, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase_ , model_name="""xlnet-base-cased""" , revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" , )
| 353
|
UpperCAmelCase_ = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
UpperCAmelCase_ = ['a', 'b', 'c', 'd', 'e']
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : Optional[int] , A__ : str ):
'''simple docstring'''
__lowerCamelCase = start
# add current to visited
visited.append(A__ )
__lowerCamelCase = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__lowerCamelCase = topological_sort(A__ , A__ , A__ )
# if all neighbors visited add current to sort
sort.append(A__ )
# if all vertices haven't been visited select a new one to visit
if len(A__ ) != len(A__ ):
for vertice in vertices:
if vertice not in visited:
__lowerCamelCase = topological_sort(A__ , A__ , A__ )
# return sort
return sort
if __name__ == "__main__":
UpperCAmelCase_ = topological_sort('a', [], [])
print(sort)
| 29
| 0
|
from collections.abc import Sequence
def lowerCamelCase__ ( A__ : Sequence[int] | None = None ):
'''simple docstring'''
if nums is None or not nums:
raise ValueError("""Input sequence should not be empty""" )
__lowerCamelCase = nums[0]
for i in range(1 , len(A__ ) ):
__lowerCamelCase = nums[i]
__lowerCamelCase = max(A__ , ans + num , A__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
UpperCAmelCase_ = int(input('Enter number of elements : ').strip())
UpperCAmelCase_ = list(map(int, input('\nEnter the numbers : ').strip().split()))[:n]
print(max_subsequence_sum(array))
| 354
|
import requests
from bsa import BeautifulSoup
def lowerCamelCase__ ( A__ : str = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
__lowerCamelCase = BeautifulSoup(requests.get(A__ ).text , """html.parser""" )
__lowerCamelCase = soup.findAll("""h1""" )
__lowerCamelCase = soup.findAll("""div""" , {"""class""": """maincounter-number"""} )
keys += soup.findAll("""span""" , {"""class""": """panel-title"""} )
values += soup.findAll("""div""" , {"""class""": """number-table-main"""} )
return {key.text.strip(): value.text.strip() for key, value in zip(A__ , A__ )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(f"""{key}\n{value}\n""")
| 29
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'
),
'distilbert-base-uncased-finetuned-sst-2-english': (
'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'
),
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : str = 'distilbert'
UpperCAmelCase__ : List[str] = {
'hidden_size': 'dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
}
def __init__( self: int , UpperCamelCase_: Any=3_05_22 , UpperCamelCase_: Optional[Any]=5_12 , UpperCamelCase_: List[Any]=False , UpperCamelCase_: List[Any]=6 , UpperCamelCase_: Optional[Any]=12 , UpperCamelCase_: List[Any]=7_68 , UpperCamelCase_: Union[str, Any]=4 * 7_68 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Any="gelu" , UpperCamelCase_: int=0.02 , UpperCamelCase_: Dict=0.1 , UpperCamelCase_: Tuple=0.2 , UpperCamelCase_: Any=0 , **UpperCamelCase_: List[str] , ):
__lowerCamelCase = vocab_size
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = sinusoidal_pos_embds
__lowerCamelCase = n_layers
__lowerCamelCase = n_heads
__lowerCamelCase = dim
__lowerCamelCase = hidden_dim
__lowerCamelCase = dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation
__lowerCamelCase = initializer_range
__lowerCamelCase = qa_dropout
__lowerCamelCase = seq_classif_dropout
super().__init__(**UpperCamelCase_ , pad_token_id=UpperCamelCase_ )
class lowerCamelCase__( __lowerCamelCase):
@property
def lowerCAmelCase__ ( self: Tuple ):
if self.task == "multiple-choice":
__lowerCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__lowerCamelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 355
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Union[str, Any] = 'yolos'
def __init__( self: Dict , UpperCamelCase_: List[Any]=7_68 , UpperCamelCase_: Tuple=12 , UpperCamelCase_: int=12 , UpperCamelCase_: int=30_72 , UpperCamelCase_: List[str]="gelu" , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: int=0.0 , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: Dict=1E-12 , UpperCamelCase_: List[Any]=[5_12, 8_64] , UpperCamelCase_: Optional[int]=16 , UpperCamelCase_: Any=3 , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: List[str]=1_00 , UpperCamelCase_: List[str]=True , UpperCamelCase_: Any=False , UpperCamelCase_: Optional[Any]=1 , UpperCamelCase_: Any=5 , UpperCamelCase_: Any=2 , UpperCamelCase_: Tuple=5 , UpperCamelCase_: str=2 , UpperCamelCase_: Any=0.1 , **UpperCamelCase_: Any , ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = qkv_bias
__lowerCamelCase = num_detection_tokens
__lowerCamelCase = use_mid_position_embeddings
__lowerCamelCase = auxiliary_loss
# Hungarian matcher
__lowerCamelCase = class_cost
__lowerCamelCase = bbox_cost
__lowerCamelCase = giou_cost
# Loss coefficients
__lowerCamelCase = bbox_loss_coefficient
__lowerCamelCase = giou_loss_coefficient
__lowerCamelCase = eos_coefficient
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = version.parse('1.11')
@property
def lowerCAmelCase__ ( self: Any ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase__ ( self: Dict ):
return 1E-4
@property
def lowerCAmelCase__ ( self: Dict ):
return 12
| 29
| 0
|
import operator
def lowerCamelCase__ ( A__ : list , A__ : bool = False , A__ : list | None = None ):
'''simple docstring'''
__lowerCamelCase = operator.lt if reverse else operator.gt
__lowerCamelCase = solution or []
if not arr:
return solution
__lowerCamelCase = [arr.pop(0 )]
for i, item in enumerate(A__ ):
if _operator(A__ , sublist[-1] ):
sublist.append(A__ )
arr.pop(A__ )
# merging sublist into solution list
if not solution:
solution.extend(A__ )
else:
while sublist:
__lowerCamelCase = sublist.pop(0 )
for i, xx in enumerate(A__ ):
if not _operator(A__ , A__ ):
solution.insert(A__ , A__ )
break
else:
solution.append(A__ )
strand_sort(A__ , A__ , A__ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 356
|
import os
from math import logaa
def lowerCamelCase__ ( A__ : str = "base_exp.txt" ):
'''simple docstring'''
__lowerCamelCase = 0
__lowerCamelCase = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(A__ ) , A__ ) ) ):
__lowerCamelCase, __lowerCamelCase = list(map(A__ , line.split(""",""" ) ) )
if x * logaa(A__ ) > largest:
__lowerCamelCase = x * logaa(A__ )
__lowerCamelCase = i + 1
return result
if __name__ == "__main__":
print(solution())
| 29
| 0
|
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
UpperCAmelCase_ = logging.getLogger(__name__)
def lowerCamelCase__ ( A__ : List[Any]=2 , A__ : Tuple=3 , A__ : Union[str, Any]=16 , A__ : int = 10 , A__ : int = 2 ):
'''simple docstring'''
def get_dataset(A__ : List[str] ):
__lowerCamelCase = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(A__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
__lowerCamelCase = get_dataset(A__ )
__lowerCamelCase = get_dataset(A__ )
__lowerCamelCase = DataLoader(A__ , shuffle=A__ , batch_size=A__ , num_workers=4 )
__lowerCamelCase = DataLoader(A__ , shuffle=A__ , batch_size=A__ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowerCamelCase__ ( A__ : str , A__ : Tuple , A__ : List[str] , A__ : str , A__ : List[str] , A__ : List[Any]=None ):
'''simple docstring'''
__lowerCamelCase = []
for epoch in range(A__ ):
# Train quickly
model.train()
for batch in dataloader:
__lowerCamelCase, __lowerCamelCase = batch
__lowerCamelCase = model(A__ )
__lowerCamelCase = torch.nn.functional.mse_loss(A__ , A__ )
accelerator.backward(A__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class lowerCamelCase__( nn.Module):
def __init__( self: Dict ):
super().__init__()
__lowerCamelCase = nn.Parameter(torch.randn(1 ) )
__lowerCamelCase = nn.Parameter(torch.randn(1 ) )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Optional[Any] ):
return x * self.a + self.b
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: str ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__lowerCamelCase = DummyModel()
__lowerCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__lowerCamelCase, __lowerCamelCase = dummy_dataloaders()
__lowerCamelCase = ProjectConfiguration(total_limit=1 , project_dir=UpperCamelCase_ , automatic_checkpoint_naming=UpperCamelCase_ )
# Train baseline
__lowerCamelCase = Accelerator(project_config=UpperCamelCase_ )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def lowerCAmelCase__ ( self: Dict ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__lowerCamelCase = DummyModel()
__lowerCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__lowerCamelCase, __lowerCamelCase = dummy_dataloaders()
# Train baseline
__lowerCamelCase = Accelerator()
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save initial
__lowerCamelCase = os.path.join(UpperCamelCase_ , """initial""" )
accelerator.save_state(UpperCamelCase_ )
((__lowerCamelCase), (__lowerCamelCase)) = model.a.item(), model.b.item()
__lowerCamelCase = optimizer.state_dict()
__lowerCamelCase = train(3 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
((__lowerCamelCase), (__lowerCamelCase)) = model.a.item(), model.b.item()
__lowerCamelCase = optimizer.state_dict()
# Train partially
set_seed(42 )
__lowerCamelCase = DummyModel()
__lowerCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__lowerCamelCase, __lowerCamelCase = dummy_dataloaders()
__lowerCamelCase = Accelerator()
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
accelerator.load_state(UpperCamelCase_ )
((__lowerCamelCase), (__lowerCamelCase)) = model.a.item(), model.b.item()
__lowerCamelCase = optimizer.state_dict()
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = train(2 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save everything
__lowerCamelCase = os.path.join(UpperCamelCase_ , """checkpoint""" )
accelerator.save_state(UpperCamelCase_ )
# Load everything back in and make sure all states work
accelerator.load_state(UpperCamelCase_ )
test_rands += train(1 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
((__lowerCamelCase), (__lowerCamelCase)) = model.a.item(), model.b.item()
__lowerCamelCase = optimizer.state_dict()
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: int ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__lowerCamelCase = DummyModel()
__lowerCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__lowerCamelCase, __lowerCamelCase = dummy_dataloaders()
__lowerCamelCase = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase_ )
# Train baseline
__lowerCamelCase = Accelerator(project_dir=UpperCamelCase_ , project_config=UpperCamelCase_ )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save initial
accelerator.save_state()
((__lowerCamelCase), (__lowerCamelCase)) = model.a.item(), model.b.item()
__lowerCamelCase = optimizer.state_dict()
__lowerCamelCase = train(3 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
((__lowerCamelCase), (__lowerCamelCase)) = model.a.item(), model.b.item()
__lowerCamelCase = optimizer.state_dict()
# Train partially
set_seed(42 )
__lowerCamelCase = DummyModel()
__lowerCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__lowerCamelCase, __lowerCamelCase = dummy_dataloaders()
__lowerCamelCase = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=UpperCamelCase_ )
__lowerCamelCase = Accelerator(project_dir=UpperCamelCase_ , project_config=UpperCamelCase_ )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
accelerator.load_state(os.path.join(UpperCamelCase_ , """checkpoints""" , """checkpoint_0""" ) )
((__lowerCamelCase), (__lowerCamelCase)) = model.a.item(), model.b.item()
__lowerCamelCase = optimizer.state_dict()
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = train(2 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(UpperCamelCase_ , """checkpoints""" , """checkpoint_1""" ) )
test_rands += train(1 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
((__lowerCamelCase), (__lowerCamelCase)) = model.a.item(), model.b.item()
__lowerCamelCase = optimizer.state_dict()
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = torch.tensor([1, 2, 3] )
__lowerCamelCase = torch.tensor([2, 3, 4] )
__lowerCamelCase = DummyModel()
__lowerCamelCase = torch.optim.Adam(net.parameters() )
__lowerCamelCase = Accelerator()
with self.assertRaises(UpperCamelCase_ ) as ve:
accelerator.register_for_checkpointing(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = str(ve.exception )
self.assertTrue("""Item at index 0""" in message )
self.assertTrue("""Item at index 1""" in message )
self.assertFalse("""Item at index 2""" in message )
self.assertFalse("""Item at index 3""" in message )
def lowerCAmelCase__ ( self: int ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__lowerCamelCase = DummyModel()
__lowerCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__lowerCamelCase = torch.optim.lr_scheduler.StepLR(UpperCamelCase_ , step_size=1 , gamma=0.99 )
__lowerCamelCase, __lowerCamelCase = dummy_dataloaders()
__lowerCamelCase = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase_ )
# Train baseline
__lowerCamelCase = Accelerator(project_dir=UpperCamelCase_ , project_config=UpperCamelCase_ )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save initial
accelerator.save_state()
__lowerCamelCase = scheduler.state_dict()
train(3 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
self.assertNotEqual(UpperCamelCase_ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(UpperCamelCase_ , """checkpoints""" , """checkpoint_0""" ) )
self.assertEqual(UpperCamelCase_ , scheduler.state_dict() )
def lowerCAmelCase__ ( self: List[Any] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__lowerCamelCase = DummyModel()
__lowerCamelCase = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase_ , total_limit=2 )
# Train baseline
__lowerCamelCase = Accelerator(project_dir=UpperCamelCase_ , project_config=UpperCamelCase_ )
__lowerCamelCase = accelerator.prepare(UpperCamelCase_ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(UpperCamelCase_ , """checkpoints""" , """checkpoint_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , """checkpoints""" , """checkpoint_9""" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , """checkpoints""" , """checkpoint_10""" ) ) )
@require_cuda
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = ["""torchrun""", F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(UpperCamelCase_ , env=os.environ.copy() )
if __name__ == "__main__":
UpperCAmelCase_ = '/tmp/accelerate/state_checkpointing'
UpperCAmelCase_ = DummyModel()
UpperCAmelCase_ = torch.optim.Adam(params=model.parameters(), lr=1E-3)
UpperCAmelCase_ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
UpperCAmelCase_ , UpperCAmelCase_ = dummy_dataloaders()
UpperCAmelCase_ = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
UpperCAmelCase_ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
UpperCAmelCase_ = group['params'][0].device
break
assert param_device.type == accelerator.device.type
UpperCAmelCase_ = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
UpperCAmelCase_ = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
UpperCAmelCase_ = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 357
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCamelCase__ ( A__ : Tuple , A__ : Optional[int]=0.999 , A__ : Any="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(A__ : Any ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(A__ : Optional[int] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'Unsupported alpha_tranform_type: {alpha_transform_type}' )
__lowerCamelCase = []
for i in range(A__ ):
__lowerCamelCase = i / num_diffusion_timesteps
__lowerCamelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(A__ ) / alpha_bar_fn(A__ ) , A__ ) )
return torch.tensor(A__ , dtype=torch.floataa )
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
UpperCAmelCase__ : List[str] = [e.name for e in KarrasDiffusionSchedulers]
UpperCAmelCase__ : Any = 2
@register_to_config
def __init__( self: List[str] , UpperCamelCase_: int = 10_00 , UpperCamelCase_: float = 0.0_0085 , UpperCamelCase_: float = 0.012 , UpperCamelCase_: str = "linear" , UpperCamelCase_: Optional[Union[np.ndarray, List[float]]] = None , UpperCamelCase_: str = "epsilon" , UpperCamelCase_: str = "linspace" , UpperCamelCase_: int = 0 , ):
if trained_betas is not None:
__lowerCamelCase = torch.tensor(UpperCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "linear":
__lowerCamelCase = torch.linspace(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__lowerCamelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , UpperCamelCase_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__lowerCamelCase = betas_for_alpha_bar(UpperCamelCase_ )
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' )
__lowerCamelCase = 1.0 - self.betas
__lowerCamelCase = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[Any]=None ):
if schedule_timesteps is None:
__lowerCamelCase = self.timesteps
__lowerCamelCase = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__lowerCamelCase = 1 if len(UpperCamelCase_ ) > 1 else 0
else:
__lowerCamelCase = timestep.cpu().item() if torch.is_tensor(UpperCamelCase_ ) else timestep
__lowerCamelCase = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowerCAmelCase__ ( self: Optional[int] ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: Union[float, torch.FloatTensor] , ):
__lowerCamelCase = self.index_for_timestep(UpperCamelCase_ )
if self.state_in_first_order:
__lowerCamelCase = self.sigmas[step_index]
else:
__lowerCamelCase = self.sigmas_interpol[step_index]
__lowerCamelCase = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: Union[str, torch.device] = None , UpperCamelCase_: Optional[int] = None , ):
__lowerCamelCase = num_inference_steps
__lowerCamelCase = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__lowerCamelCase = np.linspace(0 , num_train_timesteps - 1 , UpperCamelCase_ , dtype=UpperCamelCase_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__lowerCamelCase = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowerCamelCase = (np.arange(0 , UpperCamelCase_ ) * step_ratio).round()[::-1].copy().astype(UpperCamelCase_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__lowerCamelCase = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowerCamelCase = (np.arange(UpperCamelCase_ , 0 , -step_ratio )).round().copy().astype(UpperCamelCase_ )
timesteps -= 1
else:
raise ValueError(
F'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
__lowerCamelCase = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__lowerCamelCase = torch.from_numpy(np.log(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = np.interp(UpperCamelCase_ , np.arange(0 , len(UpperCamelCase_ ) ) , UpperCamelCase_ )
__lowerCamelCase = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__lowerCamelCase = torch.from_numpy(UpperCamelCase_ ).to(device=UpperCamelCase_ )
# interpolate sigmas
__lowerCamelCase = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
__lowerCamelCase = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__lowerCamelCase = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(UpperCamelCase_ ).startswith("""mps""" ):
# mps does not support float64
__lowerCamelCase = torch.from_numpy(UpperCamelCase_ ).to(UpperCamelCase_ , dtype=torch.floataa )
else:
__lowerCamelCase = torch.from_numpy(UpperCamelCase_ ).to(UpperCamelCase_ )
# interpolate timesteps
__lowerCamelCase = self.sigma_to_t(UpperCamelCase_ ).to(UpperCamelCase_ , dtype=timesteps.dtype )
__lowerCamelCase = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
__lowerCamelCase = torch.cat([timesteps[:1], interleaved_timesteps] )
__lowerCamelCase = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__lowerCamelCase = defaultdict(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
# get log sigma
__lowerCamelCase = sigma.log()
# get distribution
__lowerCamelCase = log_sigma - self.log_sigmas[:, None]
# get sigmas range
__lowerCamelCase = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__lowerCamelCase = low_idx + 1
__lowerCamelCase = self.log_sigmas[low_idx]
__lowerCamelCase = self.log_sigmas[high_idx]
# interpolate sigmas
__lowerCamelCase = (low - log_sigma) / (low - high)
__lowerCamelCase = w.clamp(0 , 1 )
# transform interpolation to time range
__lowerCamelCase = (1 - w) * low_idx + w * high_idx
__lowerCamelCase = t.view(sigma.shape )
return t
@property
def lowerCAmelCase__ ( self: Dict ):
return self.sample is None
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Union[torch.FloatTensor, np.ndarray] , UpperCamelCase_: Union[float, torch.FloatTensor] , UpperCamelCase_: Union[torch.FloatTensor, np.ndarray] , UpperCamelCase_: bool = True , ):
__lowerCamelCase = self.index_for_timestep(UpperCamelCase_ )
# advance index counter by 1
__lowerCamelCase = timestep.cpu().item() if torch.is_tensor(UpperCamelCase_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__lowerCamelCase = self.sigmas[step_index]
__lowerCamelCase = self.sigmas_interpol[step_index + 1]
__lowerCamelCase = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__lowerCamelCase = self.sigmas[step_index - 1]
__lowerCamelCase = self.sigmas_interpol[step_index]
__lowerCamelCase = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__lowerCamelCase = 0
__lowerCamelCase = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__lowerCamelCase = sigma_hat if self.state_in_first_order else sigma_interpol
__lowerCamelCase = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__lowerCamelCase = sigma_hat if self.state_in_first_order else sigma_interpol
__lowerCamelCase = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("""prediction_type not implemented yet: sample""" )
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__lowerCamelCase = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__lowerCamelCase = sigma_interpol - sigma_hat
# store for 2nd order step
__lowerCamelCase = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__lowerCamelCase = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__lowerCamelCase = sigma_next - sigma_hat
__lowerCamelCase = self.sample
__lowerCamelCase = None
__lowerCamelCase = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__lowerCamelCase = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(UpperCamelCase_ ):
# mps does not support float64
__lowerCamelCase = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__lowerCamelCase = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__lowerCamelCase = self.timesteps.to(original_samples.device )
__lowerCamelCase = timesteps.to(original_samples.device )
__lowerCamelCase = [self.index_for_timestep(UpperCamelCase_ , UpperCamelCase_ ) for t in timesteps]
__lowerCamelCase = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__lowerCamelCase = sigma.unsqueeze(-1 )
__lowerCamelCase = original_samples + noise * sigma
return noisy_samples
def __len__( self: Tuple ):
return self.config.num_train_timesteps
| 29
| 0
|
from functools import lru_cache
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = 2
__lowerCamelCase = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(A__ )
if n > 1:
factors.add(A__ )
return factors
@lru_cache
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
return len(unique_prime_factors(A__ ) )
def lowerCamelCase__ ( A__ : list ):
'''simple docstring'''
return len(set(A__ ) ) in (0, 1)
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = 2
while True:
# Increment each value of a generated range
__lowerCamelCase = [base + i for i in range(A__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__lowerCamelCase = [upf_len(A__ ) for x in group]
checker.append(A__ )
# If all numbers in the list are equal, return the group variable.
if equality(A__ ):
return group
# Increment our base variable by 1
base += 1
def lowerCamelCase__ ( A__ : int = 4 ):
'''simple docstring'''
__lowerCamelCase = run(A__ )
return results[0] if len(A__ ) else None
if __name__ == "__main__":
print(solution())
| 358
|
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Union[str, Any] = IFImgaImgSuperResolutionPipeline
UpperCAmelCase__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'}
UpperCAmelCase__ : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'})
UpperCAmelCase__ : Tuple = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCAmelCase__ ( self: Optional[int] ):
return self._get_superresolution_dummy_components()
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Any , UpperCamelCase_: Dict=0 ):
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase__ ( self: Dict ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCAmelCase__ ( self: int ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCAmelCase__ ( self: Optional[Any] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCAmelCase__ ( self: Optional[Any] ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCAmelCase__ ( self: List[str] ):
self._test_save_load_local()
def lowerCAmelCase__ ( self: List[Any] ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 29
| 0
|
from graphs.minimum_spanning_tree_kruskal import kruskal
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = 9
__lowerCamelCase = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
__lowerCamelCase = kruskal(A__ , A__ )
__lowerCamelCase = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(A__ ) == sorted(A__ )
| 359
|
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
__lowerCamelCase = [False] * len(A__ )
__lowerCamelCase = [-1] * len(A__ )
def dfs(A__ : Optional[int] , A__ : Optional[int] ):
__lowerCamelCase = True
__lowerCamelCase = c
for u in graph[v]:
if not visited[u]:
dfs(A__ , 1 - c )
for i in range(len(A__ ) ):
if not visited[i]:
dfs(A__ , 0 )
for i in range(len(A__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
UpperCAmelCase_ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 29
| 0
|
from __future__ import annotations
def lowerCamelCase__ ( A__ : list[int] , A__ : list[int] , A__ : int ):
'''simple docstring'''
__lowerCamelCase = list(range(len(A__ ) ) )
__lowerCamelCase = [v / w for v, w in zip(A__ , A__ )]
index.sort(key=lambda A__ : ratio[i] , reverse=A__ )
__lowerCamelCase = 0
__lowerCamelCase = [0] * len(A__ )
for i in index:
if weight[i] <= capacity:
__lowerCamelCase = 1
max_value += value[i]
capacity -= weight[i]
else:
__lowerCamelCase = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360
|
from __future__ import annotations
UpperCAmelCase_ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCamelCase__:
def __init__( self: Tuple , UpperCamelCase_: dict[str, list[str]] , UpperCamelCase_: str ):
__lowerCamelCase = graph
# mapping node to its parent in resulting breadth first tree
__lowerCamelCase = {}
__lowerCamelCase = source_vertex
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = {self.source_vertex}
__lowerCamelCase = None
__lowerCamelCase = [self.source_vertex] # first in first out queue
while queue:
__lowerCamelCase = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(UpperCamelCase_ )
__lowerCamelCase = vertex
queue.append(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: str ):
if target_vertex == self.source_vertex:
return self.source_vertex
__lowerCamelCase = self.parent.get(UpperCamelCase_ )
if target_vertex_parent is None:
__lowerCamelCase = (
F'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(UpperCamelCase_ )
return self.shortest_path(UpperCamelCase_ ) + F'->{target_vertex}'
if __name__ == "__main__":
UpperCAmelCase_ = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 29
| 0
|
import math
def lowerCamelCase__ ( A__ : list , A__ : int ):
'''simple docstring'''
__lowerCamelCase = len(A__ )
__lowerCamelCase = int(math.floor(math.sqrt(A__ ) ) )
__lowerCamelCase = 0
while arr[min(A__ , A__ ) - 1] < x:
__lowerCamelCase = step
step += int(math.floor(math.sqrt(A__ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
__lowerCamelCase = prev + 1
if prev == min(A__ , A__ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
UpperCAmelCase_ = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ = [int(item) for item in user_input.split(',')]
UpperCAmelCase_ = int(input('Enter the number to be searched:\n'))
UpperCAmelCase_ = jump_search(arr, x)
if res == -1:
print('Number not found!')
else:
print(f"""Number {x} is at index {res}""")
| 361
|
from math import ceil, sqrt
def lowerCamelCase__ ( A__ : int = 1000000 ):
'''simple docstring'''
__lowerCamelCase = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
__lowerCamelCase = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
__lowerCamelCase = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 29
| 0
|
UpperCAmelCase_ = {
'km/h': 1.0,
'm/s': 3.6,
'mph': 1.60_9344,
'knot': 1.852,
}
UpperCAmelCase_ = {
'km/h': 1.0,
'm/s': 0.2_7777_7778,
'mph': 0.6_2137_1192,
'knot': 0.5_3995_6803,
}
def lowerCamelCase__ ( A__ : float , A__ : str , A__ : str ):
'''simple docstring'''
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
__lowerCamelCase = (
f'Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'
f'Valid values are: {", ".join(A__ )}'
)
raise ValueError(A__ )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Optional[int] = IFInpaintingPipeline
UpperCAmelCase__ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
UpperCAmelCase__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase__ : List[str] = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCAmelCase__ ( self: List[str] ):
return self._get_dummy_components()
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Dict , UpperCamelCase_: str=0 ):
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase__ ( self: Union[str, Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCAmelCase__ ( self: Union[str, Any] ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCAmelCase__ ( self: Optional[int] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCAmelCase__ ( self: Any ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCAmelCase__ ( self: str ):
self._test_save_load_local()
def lowerCAmelCase__ ( self: str ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 29
| 0
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
# TODO: upload to AWS
UpperCAmelCase_ = {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Optional[Any] = 'retribert'
def __init__( self: str , UpperCamelCase_: List[str]=3_05_22 , UpperCamelCase_: Optional[int]=7_68 , UpperCamelCase_: Optional[int]=8 , UpperCamelCase_: Optional[int]=12 , UpperCamelCase_: List[Any]=30_72 , UpperCamelCase_: Union[str, Any]="gelu" , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: Tuple=5_12 , UpperCamelCase_: Dict=2 , UpperCamelCase_: List[Any]=0.02 , UpperCamelCase_: Dict=1E-12 , UpperCamelCase_: List[Any]=True , UpperCamelCase_: List[str]=1_28 , UpperCamelCase_: Any=0 , **UpperCamelCase_: Optional[int] , ):
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = share_encoders
__lowerCamelCase = projection_dim
| 363
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase)
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: str , **UpperCamelCase_: int ):
super().__init__(**UpperCamelCase_ )
if self.framework == "tf":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
requires_backends(self , """vision""" )
self.check_model_type(UpperCamelCase_ )
def __call__( self: Union[str, Any] , UpperCamelCase_: Union[str, "Image.Image", List[Dict[str, Any]]] , UpperCamelCase_: Union[str, List[str]] = None , **UpperCamelCase_: List[str] , ):
if "text_queries" in kwargs:
__lowerCamelCase = kwargs.pop("""text_queries""" )
if isinstance(UpperCamelCase_ , (str, Image.Image) ):
__lowerCamelCase = {"""image""": image, """candidate_labels""": candidate_labels}
else:
__lowerCamelCase = image
__lowerCamelCase = super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
return results
def lowerCAmelCase__ ( self: List[str] , **UpperCamelCase_: Dict ):
__lowerCamelCase = {}
if "threshold" in kwargs:
__lowerCamelCase = kwargs["""threshold"""]
if "top_k" in kwargs:
__lowerCamelCase = kwargs["""top_k"""]
return {}, {}, postprocess_params
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = load_image(inputs["""image"""] )
__lowerCamelCase = inputs["""candidate_labels"""]
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = candidate_labels.split(""",""" )
__lowerCamelCase = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(UpperCamelCase_ ):
__lowerCamelCase = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework )
__lowerCamelCase = self.image_processor(UpperCamelCase_ , return_tensors=self.framework )
yield {
"is_last": i == len(UpperCamelCase_ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Tuple ):
__lowerCamelCase = model_inputs.pop("""target_size""" )
__lowerCamelCase = model_inputs.pop("""candidate_label""" )
__lowerCamelCase = model_inputs.pop("""is_last""" )
__lowerCamelCase = self.model(**UpperCamelCase_ )
__lowerCamelCase = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs}
return model_outputs
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: Dict=0.1 , UpperCamelCase_: Union[str, Any]=None ):
__lowerCamelCase = []
for model_output in model_outputs:
__lowerCamelCase = model_output["""candidate_label"""]
__lowerCamelCase = BaseModelOutput(UpperCamelCase_ )
__lowerCamelCase = self.image_processor.post_process_object_detection(
outputs=UpperCamelCase_ , threshold=UpperCamelCase_ , target_sizes=model_output["""target_size"""] )[0]
for index in outputs["scores"].nonzero():
__lowerCamelCase = outputs["""scores"""][index].item()
__lowerCamelCase = self._get_bounding_box(outputs["""boxes"""][index][0] )
__lowerCamelCase = {"""score""": score, """label""": label, """box""": box}
results.append(UpperCamelCase_ )
__lowerCamelCase = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : x["score"] , reverse=UpperCamelCase_ )
if top_k:
__lowerCamelCase = results[:top_k]
return results
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: "torch.Tensor" ):
if self.framework != "pt":
raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""" )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = box.int().tolist()
__lowerCamelCase = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 29
| 0
|
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger('transformers.models.encodec')
UpperCAmelCase_ = {
'quantizer.vq.layers.*._codebook.inited': 'quantizer.layers.*.codebook.inited',
'quantizer.vq.layers.*._codebook.cluster_size': 'quantizer.layers.*.codebook.cluster_size',
'quantizer.vq.layers.*._codebook.embed': 'quantizer.layers.*.codebook.embed',
'quantizer.vq.layers.*._codebook.embed_avg': 'quantizer.layers.*.codebook.embed_avg',
}
UpperCAmelCase_ = {
'encoder.model.0.conv.conv': 'encoder.layers.0.conv',
'encoder.model.1.block.1.conv.conv': 'encoder.layers.1.block.1.conv',
'encoder.model.1.block.3.conv.conv': 'encoder.layers.1.block.3.conv',
'encoder.model.1.shortcut.conv.conv': 'encoder.layers.1.shortcut.conv',
'encoder.model.3.conv.conv': 'encoder.layers.3.conv',
'encoder.model.4.block.1.conv.conv': 'encoder.layers.4.block.1.conv',
'encoder.model.4.block.3.conv.conv': 'encoder.layers.4.block.3.conv',
'encoder.model.4.shortcut.conv.conv': 'encoder.layers.4.shortcut.conv',
'encoder.model.6.conv.conv': 'encoder.layers.6.conv',
'encoder.model.7.block.1.conv.conv': 'encoder.layers.7.block.1.conv',
'encoder.model.7.block.3.conv.conv': 'encoder.layers.7.block.3.conv',
'encoder.model.7.shortcut.conv.conv': 'encoder.layers.7.shortcut.conv',
'encoder.model.9.conv.conv': 'encoder.layers.9.conv',
'encoder.model.10.block.1.conv.conv': 'encoder.layers.10.block.1.conv',
'encoder.model.10.block.3.conv.conv': 'encoder.layers.10.block.3.conv',
'encoder.model.10.shortcut.conv.conv': 'encoder.layers.10.shortcut.conv',
'encoder.model.12.conv.conv': 'encoder.layers.12.conv',
'encoder.model.13.lstm': 'encoder.layers.13.lstm',
'encoder.model.15.conv.conv': 'encoder.layers.15.conv',
}
UpperCAmelCase_ = {
'encoder.model.0.conv.norm': 'encoder.layers.0.norm',
'encoder.model.1.block.1.conv.norm': 'encoder.layers.1.block.1.norm',
'encoder.model.1.block.3.conv.norm': 'encoder.layers.1.block.3.norm',
'encoder.model.1.shortcut.conv.norm': 'encoder.layers.1.shortcut.norm',
'encoder.model.3.conv.norm': 'encoder.layers.3.norm',
'encoder.model.4.block.1.conv.norm': 'encoder.layers.4.block.1.norm',
'encoder.model.4.block.3.conv.norm': 'encoder.layers.4.block.3.norm',
'encoder.model.4.shortcut.conv.norm': 'encoder.layers.4.shortcut.norm',
'encoder.model.6.conv.norm': 'encoder.layers.6.norm',
'encoder.model.7.block.1.conv.norm': 'encoder.layers.7.block.1.norm',
'encoder.model.7.block.3.conv.norm': 'encoder.layers.7.block.3.norm',
'encoder.model.7.shortcut.conv.norm': 'encoder.layers.7.shortcut.norm',
'encoder.model.9.conv.norm': 'encoder.layers.9.norm',
'encoder.model.10.block.1.conv.norm': 'encoder.layers.10.block.1.norm',
'encoder.model.10.block.3.conv.norm': 'encoder.layers.10.block.3.norm',
'encoder.model.10.shortcut.conv.norm': 'encoder.layers.10.shortcut.norm',
'encoder.model.12.conv.norm': 'encoder.layers.12.norm',
'encoder.model.15.conv.norm': 'encoder.layers.15.norm',
}
UpperCAmelCase_ = {
'decoder.model.0.conv.conv': 'decoder.layers.0.conv',
'decoder.model.1.lstm': 'decoder.layers.1.lstm',
'decoder.model.3.convtr.convtr': 'decoder.layers.3.conv',
'decoder.model.4.block.1.conv.conv': 'decoder.layers.4.block.1.conv',
'decoder.model.4.block.3.conv.conv': 'decoder.layers.4.block.3.conv',
'decoder.model.4.shortcut.conv.conv': 'decoder.layers.4.shortcut.conv',
'decoder.model.6.convtr.convtr': 'decoder.layers.6.conv',
'decoder.model.7.block.1.conv.conv': 'decoder.layers.7.block.1.conv',
'decoder.model.7.block.3.conv.conv': 'decoder.layers.7.block.3.conv',
'decoder.model.7.shortcut.conv.conv': 'decoder.layers.7.shortcut.conv',
'decoder.model.9.convtr.convtr': 'decoder.layers.9.conv',
'decoder.model.10.block.1.conv.conv': 'decoder.layers.10.block.1.conv',
'decoder.model.10.block.3.conv.conv': 'decoder.layers.10.block.3.conv',
'decoder.model.10.shortcut.conv.conv': 'decoder.layers.10.shortcut.conv',
'decoder.model.12.convtr.convtr': 'decoder.layers.12.conv',
'decoder.model.13.block.1.conv.conv': 'decoder.layers.13.block.1.conv',
'decoder.model.13.block.3.conv.conv': 'decoder.layers.13.block.3.conv',
'decoder.model.13.shortcut.conv.conv': 'decoder.layers.13.shortcut.conv',
'decoder.model.15.conv.conv': 'decoder.layers.15.conv',
}
UpperCAmelCase_ = {
'decoder.model.0.conv.norm': 'decoder.layers.0.norm',
'decoder.model.3.convtr.norm': 'decoder.layers.3.norm',
'decoder.model.4.block.1.conv.norm': 'decoder.layers.4.block.1.norm',
'decoder.model.4.block.3.conv.norm': 'decoder.layers.4.block.3.norm',
'decoder.model.4.shortcut.conv.norm': 'decoder.layers.4.shortcut.norm',
'decoder.model.6.convtr.norm': 'decoder.layers.6.norm',
'decoder.model.7.block.1.conv.norm': 'decoder.layers.7.block.1.norm',
'decoder.model.7.block.3.conv.norm': 'decoder.layers.7.block.3.norm',
'decoder.model.7.shortcut.conv.norm': 'decoder.layers.7.shortcut.norm',
'decoder.model.9.convtr.norm': 'decoder.layers.9.norm',
'decoder.model.10.block.1.conv.norm': 'decoder.layers.10.block.1.norm',
'decoder.model.10.block.3.conv.norm': 'decoder.layers.10.block.3.norm',
'decoder.model.10.shortcut.conv.norm': 'decoder.layers.10.shortcut.norm',
'decoder.model.12.convtr.norm': 'decoder.layers.12.norm',
'decoder.model.13.block.1.conv.norm': 'decoder.layers.13.block.1.norm',
'decoder.model.13.block.3.conv.norm': 'decoder.layers.13.block.3.norm',
'decoder.model.13.shortcut.conv.norm': 'decoder.layers.13.shortcut.norm',
'decoder.model.15.conv.norm': 'decoder.layers.15.norm',
}
UpperCAmelCase_ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
UpperCAmelCase_ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
UpperCAmelCase_ = []
UpperCAmelCase_ = []
def lowerCamelCase__ ( A__ : List[str] , A__ : Tuple , A__ : List[str] , A__ : List[str] , A__ : Tuple ):
'''simple docstring'''
for attribute in key.split(""".""" ):
__lowerCamelCase : Optional[Any] = getattr(A__ , A__ )
if weight_type is not None:
__lowerCamelCase : int = getattr(A__ , A__ ).shape
else:
__lowerCamelCase : Union[str, Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
__lowerCamelCase : int = value
elif weight_type == "weight_g":
__lowerCamelCase : Union[str, Any] = value
elif weight_type == "weight_v":
__lowerCamelCase : List[Any] = value
elif weight_type == "bias":
__lowerCamelCase : List[Any] = value
elif weight_type == "running_mean":
__lowerCamelCase : Union[str, Any] = value
elif weight_type == "running_var":
__lowerCamelCase : int = value
elif weight_type == "num_batches_tracked":
__lowerCamelCase : Tuple = value
elif weight_type == "weight_ih_l0":
__lowerCamelCase : List[Any] = value
elif weight_type == "weight_hh_l0":
__lowerCamelCase : Any = value
elif weight_type == "bias_ih_l0":
__lowerCamelCase : Optional[int] = value
elif weight_type == "bias_hh_l0":
__lowerCamelCase : List[Any] = value
elif weight_type == "weight_ih_l1":
__lowerCamelCase : str = value
elif weight_type == "weight_hh_l1":
__lowerCamelCase : Optional[Any] = value
elif weight_type == "bias_ih_l1":
__lowerCamelCase : Tuple = value
elif weight_type == "bias_hh_l1":
__lowerCamelCase : List[str] = value
else:
__lowerCamelCase : List[Any] = value
logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def lowerCamelCase__ ( A__ : Any , A__ : List[str] ):
'''simple docstring'''
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
__lowerCamelCase, __lowerCamelCase : str = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowerCamelCase__ ( A__ : List[Any] , A__ : List[Any] , A__ : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase : Tuple = []
if model_name == "encodec_24khz" or "encodec_32khz":
__lowerCamelCase : int = MAPPING_24K
elif model_name == "encodec_48khz":
__lowerCamelCase : str = MAPPING_48K
else:
raise ValueError(f'Unsupported model: {model_name}' )
for name, value in orig_dict.items():
if should_ignore(A__ , A__ ):
logger.info(f'{name} was ignored' )
continue
__lowerCamelCase : Optional[Any] = False
for key, mapped_key in MAPPING.items():
if "*" in key:
__lowerCamelCase, __lowerCamelCase : Union[str, Any] = key.split(""".*.""" )
if prefix in name and suffix in name:
__lowerCamelCase : Any = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("""embed""" ) and name.endswith("""embed_avg""" ):
continue
__lowerCamelCase : Any = True
if "*" in mapped_key:
__lowerCamelCase : str = name.split(A__ )[0].split(""".""" )[-2]
__lowerCamelCase : Dict = mapped_key.replace("""*""" , A__ )
if "weight_g" in name:
__lowerCamelCase : List[Any] = """weight_g"""
elif "weight_v" in name:
__lowerCamelCase : Union[str, Any] = """weight_v"""
elif "weight_ih_l0" in name:
__lowerCamelCase : Dict = """weight_ih_l0"""
elif "weight_hh_l0" in name:
__lowerCamelCase : Optional[Any] = """weight_hh_l0"""
elif "bias_ih_l0" in name:
__lowerCamelCase : Optional[int] = """bias_ih_l0"""
elif "bias_hh_l0" in name:
__lowerCamelCase : Union[str, Any] = """bias_hh_l0"""
elif "weight_ih_l1" in name:
__lowerCamelCase : Optional[Any] = """weight_ih_l1"""
elif "weight_hh_l1" in name:
__lowerCamelCase : int = """weight_hh_l1"""
elif "bias_ih_l1" in name:
__lowerCamelCase : Tuple = """bias_ih_l1"""
elif "bias_hh_l1" in name:
__lowerCamelCase : Optional[int] = """bias_hh_l1"""
elif "bias" in name:
__lowerCamelCase : Any = """bias"""
elif "weight" in name:
__lowerCamelCase : Union[str, Any] = """weight"""
elif "running_mean" in name:
__lowerCamelCase : Optional[Any] = """running_mean"""
elif "running_var" in name:
__lowerCamelCase : Tuple = """running_var"""
elif "num_batches_tracked" in name:
__lowerCamelCase : Optional[Any] = """num_batches_tracked"""
else:
__lowerCamelCase : List[str] = None
set_recursively(A__ , A__ , A__ , A__ , A__ )
continue
if not is_used:
unused_weights.append(A__ )
logger.warning(f'Unused weights: {unused_weights}' )
@torch.no_grad()
def lowerCamelCase__ ( A__ : Optional[int] , A__ : Any , A__ : Dict , A__ : List[str]=None , A__ : Dict=None , ):
'''simple docstring'''
if config_path is not None:
__lowerCamelCase : Tuple = EncodecConfig.from_pretrained(A__ )
else:
__lowerCamelCase : Optional[Any] = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
__lowerCamelCase : Optional[int] = [8, 5, 4, 4]
__lowerCamelCase : Any = [2.2]
__lowerCamelCase : List[Any] = 64
__lowerCamelCase : Union[str, Any] = 32000
__lowerCamelCase : Union[str, Any] = 2048
__lowerCamelCase : Any = False
__lowerCamelCase : List[str] = False
__lowerCamelCase : Dict = False
elif model_name == "encodec_48khz":
__lowerCamelCase : int = [8, 5, 4, 2]
__lowerCamelCase : Dict = [3.0, 6.0, 12.0, 24.0]
__lowerCamelCase : Optional[int] = 48000
__lowerCamelCase : Optional[int] = 2
__lowerCamelCase : str = False
__lowerCamelCase : Union[str, Any] = """time_group_norm"""
__lowerCamelCase : Any = True
__lowerCamelCase : int = 1.0
__lowerCamelCase : Union[str, Any] = 0.01
else:
raise ValueError(f'Unknown model name: {model_name}' )
__lowerCamelCase : Any = EncodecModel(A__ )
__lowerCamelCase : Tuple = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(A__ )
__lowerCamelCase : int = torch.load(A__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
__lowerCamelCase : Tuple = original_checkpoint["""best_state"""]
recursively_load_weights(A__ , A__ , A__ )
model.save_pretrained(A__ )
if repo_id:
print("""Pushing to the hub...""" )
feature_extractor.push_to_hub(A__ )
model.push_to_hub(A__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--model',
default='encodec_24khz',
type=str,
help='The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
UpperCAmelCase_ = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 364
|
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece.model')
UpperCAmelCase_ = {'target_lang': 'fi', 'source_lang': 'en'}
UpperCAmelCase_ = '>>zh<<'
UpperCAmelCase_ = 'Helsinki-NLP/'
if is_torch_available():
UpperCAmelCase_ = 'pt'
elif is_tf_available():
UpperCAmelCase_ = 'tf'
else:
UpperCAmelCase_ = 'jax'
@require_sentencepiece
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Union[str, Any] = MarianTokenizer
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : int = True
def lowerCAmelCase__ ( self: Union[str, Any] ):
super().setUp()
__lowerCamelCase = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
__lowerCamelCase = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__lowerCamelCase = Path(self.tmpdirname )
save_json(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
__lowerCamelCase = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self: Optional[Any] , **UpperCamelCase_: Any ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Optional[int] ):
return (
"This is a test",
"This is a test",
)
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = """</s>"""
__lowerCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(UpperCamelCase_ ) , 9 )
def lowerCAmelCase__ ( self: Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = MarianTokenizer.from_pretrained(F'{ORG_NAME}opus-mt-en-de' )
__lowerCamelCase = en_de_tokenizer(["""I am a small frog"""] , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = [38, 1_21, 14, 6_97, 3_88_48, 0]
self.assertListEqual(UpperCamelCase_ , batch.input_ids[0] )
__lowerCamelCase = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = [x.name for x in Path(UpperCamelCase_ ).glob("""*""" )]
self.assertIn("""source.spm""" , UpperCamelCase_ )
MarianTokenizer.from_pretrained(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = tok(
["""I am a small frog""" * 10_00, """I am a small frog"""] , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 5_12) )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def lowerCAmelCase__ ( self: Optional[int] ):
# fmt: off
__lowerCamelCase = {"""input_ids""": [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase_ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
__lowerCamelCase = """Tämä on testi"""
__lowerCamelCase = """This is a test"""
__lowerCamelCase = [76, 7, 20_47, 2]
__lowerCamelCase = [69, 12, 11, 9_40, 2]
__lowerCamelCase = tokenizer(UpperCamelCase_ ).input_ids
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = tokenizer(text_target=UpperCamelCase_ ).input_ids
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
| 29
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 365
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class lowerCamelCase__( unittest.TestCase):
@parameterized.expand([(None,), ("""foo.json""",)] )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: List[str] ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCamelCase_ , config_name=UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ , config_name=UpperCamelCase_ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , UpperCamelCase_ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = AutoConfig.from_pretrained("""gpt2""" )
__lowerCamelCase = GenerationConfig.from_model_config(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = GenerationConfig()
__lowerCamelCase = {
"""max_new_tokens""": 10_24,
"""foo""": """bar""",
}
__lowerCamelCase = copy.deepcopy(UpperCamelCase_ )
__lowerCamelCase = generation_config.update(**UpperCamelCase_ )
# update_kwargs was not modified (no side effects)
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 10_24 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(UpperCamelCase_ , {"""foo""": """bar"""} )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = GenerationConfig()
__lowerCamelCase = """bar"""
with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir:
generation_config.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""" )
__lowerCamelCase = GenerationConfig.from_model_config(UpperCamelCase_ )
assert not hasattr(UpperCamelCase_ , """foo""" ) # no new kwargs should be initialized if from config
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , UpperCamelCase_ )
self.assertEqual(default_config.num_beams , 1 )
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , UpperCamelCase_ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , UpperCamelCase_ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class lowerCamelCase__( unittest.TestCase):
@classmethod
def lowerCAmelCase__ ( cls: Optional[Any] ):
__lowerCamelCase = TOKEN
HfFolder.save_token(UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: str ):
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" )
except HTTPError:
pass
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained(F'{USER}/test-generation-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCamelCase_ , repo_id="""test-generation-config""" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained(F'{USER}/test-generation-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCamelCase_ , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
| 29
| 0
|
import qiskit
def lowerCamelCase__ ( A__ : int , A__ : int ):
'''simple docstring'''
__lowerCamelCase = qiskit.Aer.get_backend("""aer_simulator""" )
__lowerCamelCase = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
__lowerCamelCase = qiskit.execute(A__ , A__ , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(A__ )
if __name__ == "__main__":
UpperCAmelCase_ = half_adder(1, 1)
print(f"""Half Adder Output Qubit Counts: {counts}""")
| 366
|
def lowerCamelCase__ ( A__ : list ):
'''simple docstring'''
for i in range(len(A__ ) - 1 , 0 , -1 ):
__lowerCamelCase = False
for j in range(A__ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
__lowerCamelCase, __lowerCamelCase = unsorted[j - 1], unsorted[j]
__lowerCamelCase = True
for j in range(A__ ):
if unsorted[j] > unsorted[j + 1]:
__lowerCamelCase, __lowerCamelCase = unsorted[j + 1], unsorted[j]
__lowerCamelCase = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ = [int(item) for item in user_input.split(',')]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 29
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 367
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowerCamelCase__ ( A__ : Dict , A__ : Optional[int]=False ):
'''simple docstring'''
try:
__lowerCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__lowerCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
__lowerCamelCase = strtobool(A__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
UpperCAmelCase_ = parse_flag_from_env('RUN_SLOW', default=False)
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skip("""Test was skipped""" )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , """test is slow""" )(A__ )
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , """test requires only a CPU""" )(A__ )
def lowerCamelCase__ ( A__ : List[str] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , """test requires a GPU""" )(A__ )
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , """test requires a XPU""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , """test requires a `mps` backend support in `torch`""" )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , """test requires the Hugging Face suite""" )(A__ )
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , """test requires the bitsandbytes library""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , """test requires TPU""" )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , """test requires a GPU""" )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , """test requires a XPU""" )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , """test requires multiple GPUs""" )(A__ )
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , """test requires multiple XPUs""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , """test requires safetensors""" )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , """test requires DeepSpeed""" )(A__ )
def lowerCamelCase__ ( A__ : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version(""">=""" , """1.12.0""" ) , """test requires torch version >= 1.12.0""" )(A__ )
def lowerCamelCase__ ( A__ : Tuple=None , A__ : Optional[Any]=None ):
'''simple docstring'''
if test_case is None:
return partial(A__ , version=A__ )
return unittest.skipUnless(is_torch_version(""">=""" , A__ ) , f'test requires torch version >= {version}' )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , """test requires Tensorboard""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , """test requires wandb""" )(A__ )
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , """test requires comet_ml""" )(A__ )
UpperCAmelCase_ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , """test requires at least one tracker to be available and for `comet_ml` to not be installed""" , )(A__ )
class lowerCamelCase__( unittest.TestCase):
UpperCAmelCase__ : List[Any] = True
@classmethod
def lowerCAmelCase__ ( cls: int ):
__lowerCamelCase = tempfile.mkdtemp()
@classmethod
def lowerCAmelCase__ ( cls: Any ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def lowerCAmelCase__ ( self: Any ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("""**/*""" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCamelCase_ )
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: int ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Union[mock.Mock, List[mock.Mock]] ):
__lowerCamelCase = mocks if isinstance(UpperCamelCase_ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase = AcceleratorState()
__lowerCamelCase = tensor[None].clone().to(state.device )
__lowerCamelCase = gather(A__ ).cpu()
__lowerCamelCase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , A__ ):
return False
return True
class lowerCamelCase__:
def __init__( self: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: Any , UpperCamelCase_: Any ):
__lowerCamelCase = returncode
__lowerCamelCase = stdout
__lowerCamelCase = stderr
async def lowerCamelCase__ ( A__ : int , A__ : Any ):
'''simple docstring'''
while True:
__lowerCamelCase = await stream.readline()
if line:
callback(A__ )
else:
break
async def lowerCamelCase__ ( A__ : Dict , A__ : List[str]=None , A__ : Any=None , A__ : Optional[Any]=None , A__ : Tuple=False , A__ : List[Any]=False ):
'''simple docstring'''
if echo:
print("""\nRunning: """ , """ """.join(A__ ) )
__lowerCamelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=A__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=A__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__lowerCamelCase = []
__lowerCamelCase = []
def tee(A__ : int , A__ : Any , A__ : Optional[Any] , A__ : int="" ):
__lowerCamelCase = line.decode("""utf-8""" ).rstrip()
sink.append(A__ )
if not quiet:
print(A__ , A__ , file=A__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda A__ : tee(A__ , A__ , sys.stdout , label="""stdout:""" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda A__ : tee(A__ , A__ , sys.stderr , label="""stderr:""" ) ) ),
] , timeout=A__ , )
return _RunOutput(await p.wait() , A__ , A__ )
def lowerCamelCase__ ( A__ : Optional[Any] , A__ : Any=None , A__ : Union[str, Any]=None , A__ : Dict=180 , A__ : str=False , A__ : List[Any]=True ):
'''simple docstring'''
__lowerCamelCase = asyncio.get_event_loop()
__lowerCamelCase = loop.run_until_complete(
_stream_subprocess(A__ , env=A__ , stdin=A__ , timeout=A__ , quiet=A__ , echo=A__ ) )
__lowerCamelCase = """ """.join(A__ )
if result.returncode > 0:
__lowerCamelCase = """\n""".join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
return result
class lowerCamelCase__( __lowerCamelCase):
pass
def lowerCamelCase__ ( A__ : List[str] , A__ : Union[str, Any]=False ):
'''simple docstring'''
try:
__lowerCamelCase = subprocess.check_output(A__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(A__ , """decode""" ):
__lowerCamelCase = output.decode("""utf-8""" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'Command `{" ".join(A__ )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 29
| 0
|
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase, __lowerCamelCase = 9, 14 # noqa: F841
__lowerCamelCase = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
__lowerCamelCase = defaultdict(A__ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
__lowerCamelCase = mst(A__ )
__lowerCamelCase = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
__lowerCamelCase = tuple(answer[:2] )
__lowerCamelCase = tuple(edge[::-1] )
assert edge in result or reverse in result
| 368
|
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
UpperCAmelCase_ = datasets.utils.logging.get_logger(__name__)
class lowerCamelCase__( folder_based_builder.FolderBasedBuilderConfig):
UpperCAmelCase__ : bool = None
UpperCAmelCase__ : bool = None
class lowerCamelCase__( folder_based_builder.FolderBasedBuilder):
UpperCAmelCase__ : List[Any] = datasets.Audio()
UpperCAmelCase__ : str = 'audio'
UpperCAmelCase__ : Union[str, Any] = AudioFolderConfig
UpperCAmelCase__ : List[str] # definition at the bottom of the script
UpperCAmelCase__ : Optional[int] = AudioClassification(audio_column='audio' , label_column='label')
UpperCAmelCase_ = [
'.aiff',
'.au',
'.avr',
'.caf',
'.flac',
'.htk',
'.svx',
'.mat4',
'.mat5',
'.mpc2k',
'.ogg',
'.paf',
'.pvf',
'.raw',
'.rf64',
'.sd2',
'.sds',
'.ircam',
'.voc',
'.w64',
'.wav',
'.nist',
'.wavex',
'.wve',
'.xi',
'.mp3',
'.opus',
]
UpperCAmelCase_ = AUDIO_EXTENSIONS
| 29
| 0
|
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
UpperCAmelCase_ = '\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n'
UpperCAmelCase_ = '\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n'
UpperCAmelCase_ = '\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def lowerCamelCase__ ( A__ : Tuple , A__ : Dict ):
'''simple docstring'''
return float((preds == labels).mean() )
def lowerCamelCase__ ( A__ : Dict , A__ : Tuple , A__ : Union[str, Any]="binary" ):
'''simple docstring'''
__lowerCamelCase = simple_accuracy(A__ , A__ )
__lowerCamelCase = float(fa_score(y_true=A__ , y_pred=A__ , average=A__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCamelCase__ ( A__ : str , A__ : int ):
'''simple docstring'''
__lowerCamelCase = {}
for id_pred, label in zip(A__ , A__ ):
__lowerCamelCase = f'{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'
__lowerCamelCase = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
__lowerCamelCase = [(pred, label)]
__lowerCamelCase, __lowerCamelCase = [], []
for question, preds_labels in question_map.items():
__lowerCamelCase, __lowerCamelCase = zip(*A__ )
__lowerCamelCase = fa_score(y_true=A__ , y_pred=A__ , average="""macro""" )
fas.append(A__ )
__lowerCamelCase = int(sum(pred == label for pred, label in preds_labels ) == len(A__ ) )
ems.append(A__ )
__lowerCamelCase = float(sum(A__ ) / len(A__ ) )
__lowerCamelCase = sum(A__ ) / len(A__ )
__lowerCamelCase = float(fa_score(y_true=A__ , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowerCamelCase__( datasets.Metric):
def lowerCAmelCase__ ( self: Any ):
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , )
def lowerCAmelCase__ ( self: List[str] ):
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"prediction_text": datasets.Value("""string""" ),
},
"references": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"answers": datasets.Sequence(datasets.Value("""string""" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64""" ),
"paragraph": datasets.Value("""int64""" ),
"question": datasets.Value("""int64""" ),
},
"prediction": datasets.Value("""int64""" ),
},
"references": datasets.Value("""int64""" ),
}
else:
return {
"predictions": datasets.Value("""int64""" ),
"references": datasets.Value("""int64""" ),
}
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: int ):
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(UpperCamelCase_ , UpperCamelCase_ )}
elif self.config_name == "cb":
return acc_and_fa(UpperCamelCase_ , UpperCamelCase_ , fa_avg="""macro""" )
elif self.config_name == "record":
__lowerCamelCase = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
__lowerCamelCase = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(UpperCamelCase_ , UpperCamelCase_ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(UpperCamelCase_ , UpperCamelCase_ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(UpperCamelCase_ , UpperCamelCase_ )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
| 369
|
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : str = 'segformer'
def __init__( self: Union[str, Any] , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: Any=4 , UpperCamelCase_: int=[2, 2, 2, 2] , UpperCamelCase_: Optional[Any]=[8, 4, 2, 1] , UpperCamelCase_: Union[str, Any]=[32, 64, 1_60, 2_56] , UpperCamelCase_: int=[7, 3, 3, 3] , UpperCamelCase_: Dict=[4, 2, 2, 2] , UpperCamelCase_: str=[1, 2, 5, 8] , UpperCamelCase_: List[str]=[4, 4, 4, 4] , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: List[Any]=0.0 , UpperCamelCase_: List[Any]=0.0 , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: Optional[int]=1E-6 , UpperCamelCase_: Optional[int]=2_56 , UpperCamelCase_: Optional[Any]=2_55 , **UpperCamelCase_: List[Any] , ):
super().__init__(**UpperCamelCase_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , UpperCamelCase_ , )
__lowerCamelCase = num_channels
__lowerCamelCase = num_encoder_blocks
__lowerCamelCase = depths
__lowerCamelCase = sr_ratios
__lowerCamelCase = hidden_sizes
__lowerCamelCase = patch_sizes
__lowerCamelCase = strides
__lowerCamelCase = mlp_ratios
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = classifier_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = drop_path_rate
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = decoder_hidden_size
__lowerCamelCase = kwargs.get("""reshape_last_stage""" , UpperCamelCase_ )
__lowerCamelCase = semantic_loss_ignore_index
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Any = version.parse('1.11')
@property
def lowerCAmelCase__ ( self: Any ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase__ ( self: Union[str, Any] ):
return 1E-4
@property
def lowerCAmelCase__ ( self: Dict ):
return 12
| 29
| 0
|
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ = 256
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Union[str, Any] = ['melgan']
def __init__( self: Optional[Any] , UpperCamelCase_: SpectrogramNotesEncoder , UpperCamelCase_: SpectrogramContEncoder , UpperCamelCase_: TaFilmDecoder , UpperCamelCase_: DDPMScheduler , UpperCamelCase_: OnnxRuntimeModel if is_onnx_available() else Any , ):
super().__init__()
# From MELGAN
__lowerCamelCase = math.log(1E-5 ) # Matches MelGAN training.
__lowerCamelCase = 4.0 # Largest value for most examples
__lowerCamelCase = 1_28
self.register_modules(
notes_encoder=UpperCamelCase_ , continuous_encoder=UpperCamelCase_ , decoder=UpperCamelCase_ , scheduler=UpperCamelCase_ , melgan=UpperCamelCase_ , )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: int=(-1.0, 1.0) , UpperCamelCase_: Union[str, Any]=False ):
__lowerCamelCase, __lowerCamelCase = output_range
if clip:
__lowerCamelCase = torch.clip(UpperCamelCase_ , self.min_value , self.max_value )
# Scale to [0, 1].
__lowerCamelCase = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[Any]=(-1.0, 1.0) , UpperCamelCase_: Dict=False ):
__lowerCamelCase, __lowerCamelCase = input_range
__lowerCamelCase = torch.clip(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) if clip else outputs
# Scale to [0, 1].
__lowerCamelCase = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Any , UpperCamelCase_: Any , UpperCamelCase_: Dict ):
__lowerCamelCase = input_tokens > 0
__lowerCamelCase, __lowerCamelCase = self.notes_encoder(
encoder_input_tokens=UpperCamelCase_ , encoder_inputs_mask=UpperCamelCase_ )
__lowerCamelCase, __lowerCamelCase = self.continuous_encoder(
encoder_inputs=UpperCamelCase_ , encoder_inputs_mask=UpperCamelCase_ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int] ):
__lowerCamelCase = noise_time
if not torch.is_tensor(UpperCamelCase_ ):
__lowerCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(UpperCamelCase_ ) and len(timesteps.shape ) == 0:
__lowerCamelCase = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__lowerCamelCase = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
__lowerCamelCase = self.decoder(
encodings_and_masks=UpperCamelCase_ , decoder_input_tokens=UpperCamelCase_ , decoder_noise_time=UpperCamelCase_ )
return logits
@torch.no_grad()
def __call__( self: List[Any] , UpperCamelCase_: List[List[int]] , UpperCamelCase_: Optional[torch.Generator] = None , UpperCamelCase_: int = 1_00 , UpperCamelCase_: bool = True , UpperCamelCase_: str = "numpy" , UpperCamelCase_: Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase_: int = 1 , ):
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(UpperCamelCase_ )}.' )
__lowerCamelCase = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
__lowerCamelCase = np.zeros([1, 0, self.n_dims] , np.floataa )
__lowerCamelCase = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=UpperCamelCase_ , device=self.device )
for i, encoder_input_tokens in enumerate(UpperCamelCase_ ):
if i == 0:
__lowerCamelCase = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
__lowerCamelCase = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=UpperCamelCase_ , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
__lowerCamelCase = ones
__lowerCamelCase = self.scale_features(
UpperCamelCase_ , output_range=[-1.0, 1.0] , clip=UpperCamelCase_ )
__lowerCamelCase = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=UpperCamelCase_ , continuous_mask=UpperCamelCase_ , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
__lowerCamelCase = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=UpperCamelCase_ , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(UpperCamelCase_ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__lowerCamelCase = self.decode(
encodings_and_masks=UpperCamelCase_ , input_tokens=UpperCamelCase_ , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
__lowerCamelCase = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
__lowerCamelCase = self.scale_to_features(UpperCamelCase_ , input_range=[-1.0, 1.0] )
__lowerCamelCase = mel[:1]
__lowerCamelCase = mel.cpu().float().numpy()
__lowerCamelCase = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCamelCase_ , UpperCamelCase_ )
logger.info("""Generated segment""" , UpperCamelCase_ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"""Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'.""" )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"""Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'.""" )
if output_type == "numpy":
__lowerCamelCase = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
__lowerCamelCase = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=UpperCamelCase_ )
| 370
|
import string
import numpy
def lowerCamelCase__ ( A__ : int , A__ : int ):
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a , A__ )
class lowerCamelCase__:
UpperCAmelCase__ : Optional[int] = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
UpperCAmelCase__ : Optional[int] = numpy.vectorize(lambda __lowerCamelCase: x % 36)
UpperCAmelCase__ : List[Any] = numpy.vectorize(__lowerCamelCase)
def __init__( self: List[Any] , UpperCamelCase_: numpy.ndarray ):
__lowerCamelCase = self.modulus(UpperCamelCase_ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__lowerCamelCase = encrypt_key.shape[0]
def lowerCAmelCase__ ( self: str , UpperCamelCase_: str ):
return self.key_string.index(UpperCamelCase_ )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: int ):
return self.key_string[round(UpperCamelCase_ )]
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__lowerCamelCase = det % len(self.key_string )
__lowerCamelCase = len(self.key_string )
if greatest_common_divisor(UpperCamelCase_ , len(self.key_string ) ) != 1:
__lowerCamelCase = (
F'determinant modular {req_l} of encryption key({det}) '
F'is not co prime w.r.t {req_l}.\nTry another key.'
)
raise ValueError(UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: str ):
__lowerCamelCase = [char for char in text.upper() if char in self.key_string]
__lowerCamelCase = chars[-1]
while len(UpperCamelCase_ ) % self.break_key != 0:
chars.append(UpperCamelCase_ )
return "".join(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
__lowerCamelCase = self.process_text(text.upper() )
__lowerCamelCase = """"""
for i in range(0 , len(UpperCamelCase_ ) - self.break_key + 1 , self.break_key ):
__lowerCamelCase = text[i : i + self.break_key]
__lowerCamelCase = [self.replace_letters(UpperCamelCase_ ) for char in batch]
__lowerCamelCase = numpy.array([vec] ).T
__lowerCamelCase = self.modulus(self.encrypt_key.dot(UpperCamelCase_ ) ).T.tolist()[
0
]
__lowerCamelCase = """""".join(
self.replace_digits(UpperCamelCase_ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__lowerCamelCase = det % len(self.key_string )
__lowerCamelCase = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
__lowerCamelCase = i
break
__lowerCamelCase = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
__lowerCamelCase = self.make_decrypt_key()
__lowerCamelCase = self.process_text(text.upper() )
__lowerCamelCase = """"""
for i in range(0 , len(UpperCamelCase_ ) - self.break_key + 1 , self.break_key ):
__lowerCamelCase = text[i : i + self.break_key]
__lowerCamelCase = [self.replace_letters(UpperCamelCase_ ) for char in batch]
__lowerCamelCase = numpy.array([vec] ).T
__lowerCamelCase = self.modulus(decrypt_key.dot(UpperCamelCase_ ) ).T.tolist()[0]
__lowerCamelCase = """""".join(
self.replace_digits(UpperCamelCase_ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = int(input("""Enter the order of the encryption key: """ ) )
__lowerCamelCase = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(A__ ):
__lowerCamelCase = [int(A__ ) for x in input().split()]
hill_matrix.append(A__ )
__lowerCamelCase = HillCipher(numpy.array(A__ ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
__lowerCamelCase = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
__lowerCamelCase = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(A__ ) )
elif option == "2":
__lowerCamelCase = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(A__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 29
| 0
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ = {
'gpt-neox-20b': 2_048,
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = VOCAB_FILES_NAMES
UpperCAmelCase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Optional[Any] = ['input_ids', 'attention_mask']
def __init__( self: List[str] , UpperCamelCase_: int=None , UpperCamelCase_: Union[str, Any]=None , UpperCamelCase_: Dict=None , UpperCamelCase_: str="<|endoftext|>" , UpperCamelCase_: Any="<|endoftext|>" , UpperCamelCase_: Optional[Any]="<|endoftext|>" , UpperCamelCase_: int=False , **UpperCamelCase_: Optional[Any] , ):
super().__init__(
UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , unk_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
__lowerCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCamelCase_ ) != add_prefix_space:
__lowerCamelCase = getattr(UpperCamelCase_ , pre_tok_state.pop("""type""" ) )
__lowerCamelCase = add_prefix_space
__lowerCamelCase = pre_tok_class(**UpperCamelCase_ )
__lowerCamelCase = add_prefix_space
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ):
__lowerCamelCase = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: "Conversation" ):
__lowerCamelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) + [self.eos_token_id] )
if len(UpperCamelCase_ ) > self.model_max_length:
__lowerCamelCase = input_ids[-self.model_max_length :]
return input_ids
| 371
|
import qiskit
def lowerCamelCase__ ( A__ : int , A__ : int ):
'''simple docstring'''
__lowerCamelCase = qiskit.Aer.get_backend("""aer_simulator""" )
__lowerCamelCase = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
__lowerCamelCase = qiskit.execute(A__ , A__ , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(A__ )
if __name__ == "__main__":
UpperCAmelCase_ = half_adder(1, 1)
print(f"""Half Adder Output Qubit Counts: {counts}""")
| 29
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
"""configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""],
"""processing_git""": ["""GitProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""GIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GitForCausalLM""",
"""GitModel""",
"""GitPreTrainedModel""",
"""GitVisionModel""",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 30
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
"""configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""],
"""processing_git""": ["""GitProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""GIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GitForCausalLM""",
"""GitModel""",
"""GitPreTrainedModel""",
"""GitVisionModel""",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 30
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.