code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : str = ["""a""", """b""", """c"""]
# Defaults to last layer if both are None
lowerCamelCase__ : Tuple = get_aligned_output_features_output_indices(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , ["""c"""] )
self.assertEqual(UpperCamelCase__ , [2] )
# Out indices set to match out features
lowerCamelCase__ : Any = get_aligned_output_features_output_indices(["""a""", """c"""] , UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , ["""a""", """c"""] )
self.assertEqual(UpperCamelCase__ , [0, 2] )
# Out features set to match out indices
lowerCamelCase__ : Dict = get_aligned_output_features_output_indices(UpperCamelCase__ , [0, 2] , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , ["""a""", """c"""] )
self.assertEqual(UpperCamelCase__ , [0, 2] )
# Out features selected from negative indices
lowerCamelCase__ : List[str] = get_aligned_output_features_output_indices(UpperCamelCase__ , [-3, -1] , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , ["""a""", """c"""] )
self.assertEqual(UpperCamelCase__ , [-3, -1] )
def lowerCamelCase_ ( self: List[Any] ):
# Stage names must be set
with self.assertRaises(UpperCamelCase__ ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , UpperCamelCase__ )
# Out features must be a list
with self.assertRaises(UpperCamelCase__ ):
verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""] )
# Out features must be a subset of stage names
with self.assertRaises(UpperCamelCase__ ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""] )
# Out indices must be a list or tuple
with self.assertRaises(UpperCamelCase__ ):
verify_out_features_out_indices(UpperCamelCase__ , 0 , ["""a""", """b"""] )
# Out indices must be a subset of stage names
with self.assertRaises(UpperCamelCase__ ):
verify_out_features_out_indices(UpperCamelCase__ , (0, 1) , ["""a"""] )
# Out features and out indices must be the same length
with self.assertRaises(UpperCamelCase__ ):
verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""] )
# Out features should match out indices
with self.assertRaises(UpperCamelCase__ ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""] )
# Out features and out indices should be in order
with self.assertRaises(UpperCamelCase__ ):
verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""] )
# Check passes with valid inputs
verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""] )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : str = BackboneMixin()
lowerCamelCase__ : Optional[Any] = ["""a""", """b""", """c"""]
lowerCamelCase__ : List[str] = ["""a""", """c"""]
lowerCamelCase__ : Any = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["""a""", """c"""] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
lowerCamelCase__ : Optional[Any] = ["""a""", """b"""]
self.assertEqual(backbone.out_features , ["""a""", """b"""] )
self.assertEqual(backbone.out_indices , [0, 1] )
lowerCamelCase__ : str = [-3, -1]
self.assertEqual(backbone.out_features , ["""a""", """c"""] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 720 |
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[Any]:
lowerCamelCase__ : str = [False] * len(UpperCamelCase )
lowerCamelCase__ : str = [-1] * len(UpperCamelCase )
def dfs(UpperCamelCase , UpperCamelCase ):
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Union[str, Any] = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCamelCase , 1 - c )
for i in range(len(UpperCamelCase ) ):
if not visited[i]:
dfs(UpperCamelCase , 0 )
for i in range(len(UpperCamelCase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_A : int ={0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 631 | 0 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_A : List[Any] =0
_A : int =[
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_A : Optional[Any] =[[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_A : Optional[Any] =tuple[int, int]
class _lowercase :
def __init__( self: List[str] , UpperCamelCase__: int , UpperCamelCase__: int , UpperCamelCase__: int , UpperCamelCase__: int , UpperCamelCase__: int , UpperCamelCase__: Node | None , ):
lowerCamelCase__ : Dict = pos_x
lowerCamelCase__ : int = pos_y
lowerCamelCase__ : List[Any] = (pos_y, pos_x)
lowerCamelCase__ : Dict = goal_x
lowerCamelCase__ : Any = goal_y
lowerCamelCase__ : List[Any] = g_cost
lowerCamelCase__ : Optional[Any] = parent
lowerCamelCase__ : Any = self.calculate_heuristic()
lowerCamelCase__ : List[Any] = self.g_cost + self.h_cost
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : int = self.pos_x - self.goal_x
lowerCamelCase__ : Union[str, Any] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(UpperCamelCase__ ) + abs(UpperCamelCase__ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self: Optional[int] , UpperCamelCase__: Node ):
return self.f_cost < other.f_cost
class _lowercase :
def __init__( self: str , UpperCamelCase__: TPosition , UpperCamelCase__: TPosition ):
lowerCamelCase__ : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = [self.start]
lowerCamelCase__ : list[Node] = []
lowerCamelCase__ : List[Any] = False
def lowerCamelCase_ ( self: int ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
lowerCamelCase__ : Dict = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(UpperCamelCase__ )
self.closed_nodes.append(UpperCamelCase__ )
lowerCamelCase__ : Tuple = self.get_successors(UpperCamelCase__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(UpperCamelCase__ )
else:
# retrieve the best current path
lowerCamelCase__ : int = self.open_nodes.pop(self.open_nodes.index(UpperCamelCase__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(UpperCamelCase__ )
else:
self.open_nodes.append(UpperCamelCase__ )
return [self.start.pos]
def lowerCamelCase_ ( self: Any , UpperCamelCase__: Node ):
lowerCamelCase__ : List[str] = []
for action in delta:
lowerCamelCase__ : List[str] = parent.pos_x + action[1]
lowerCamelCase__ : Tuple = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCamelCase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
UpperCamelCase__ , UpperCamelCase__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , UpperCamelCase__ , ) )
return successors
def lowerCamelCase_ ( self: Any , UpperCamelCase__: Node | None ):
lowerCamelCase__ : Dict = node
lowerCamelCase__ : Optional[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCamelCase__ : List[Any] = current_node.parent
path.reverse()
return path
class _lowercase :
def __init__( self: Dict , UpperCamelCase__: TPosition , UpperCamelCase__: TPosition ):
lowerCamelCase__ : Dict = AStar(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : str = AStar(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = False
def lowerCamelCase_ ( self: List[str] ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
lowerCamelCase__ : List[Any] = self.fwd_astar.open_nodes.pop(0 )
lowerCamelCase__ : List[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
UpperCamelCase__ , UpperCamelCase__ )
self.fwd_astar.closed_nodes.append(UpperCamelCase__ )
self.bwd_astar.closed_nodes.append(UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = current_bwd_node
lowerCamelCase__ : Optional[int] = current_fwd_node
lowerCamelCase__ : Optional[int] = {
self.fwd_astar: self.fwd_astar.get_successors(UpperCamelCase__ ),
self.bwd_astar: self.bwd_astar.get_successors(UpperCamelCase__ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(UpperCamelCase__ )
else:
# retrieve the best current path
lowerCamelCase__ : Union[str, Any] = astar.open_nodes.pop(
astar.open_nodes.index(UpperCamelCase__ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(UpperCamelCase__ )
else:
astar.open_nodes.append(UpperCamelCase__ )
return [self.fwd_astar.start.pos]
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Node , UpperCamelCase__: Node ):
lowerCamelCase__ : Optional[Any] = self.fwd_astar.retrace_path(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = self.bwd_astar.retrace_path(UpperCamelCase__ )
bwd_path.pop()
bwd_path.reverse()
lowerCamelCase__ : Any = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_A : Optional[Any] =(0, 0)
_A : Tuple =(len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_A : Optional[Any] =time.time()
_A : List[Any] =AStar(init, goal)
_A : Dict =a_star.search()
_A : Any =time.time() - start_time
print(F'AStar execution time = {end_time:f} seconds')
_A : List[Any] =time.time()
_A : str =BidirectionalAStar(init, goal)
_A : Optional[Any] =time.time() - bd_start_time
print(F'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 721 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class _lowercase ( _lowercase ):
def __init__( self: Optional[Any] , UpperCamelCase__: Any , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : Optional[int] = dataset
lowerCamelCase__ : Optional[int] = process
lowerCamelCase__ : List[str] = params
def __len__( self: List[str] ):
return len(self.dataset )
def __getitem__( self: Any , UpperCamelCase__: int ):
lowerCamelCase__ : Dict = self.dataset[i]
lowerCamelCase__ : Union[str, Any] = self.process(UpperCamelCase__ , **self.params )
return processed
class _lowercase ( _lowercase ):
def __init__( self: Optional[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: str , UpperCamelCase__: Tuple , UpperCamelCase__: Any=None ):
lowerCamelCase__ : int = loader
lowerCamelCase__ : str = infer
lowerCamelCase__ : Optional[int] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : int = loader_batch_size
# Internal bookkeeping
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Optional[Any] = None
def __len__( self: Dict ):
return len(self.loader )
def __iter__( self: Optional[int] ):
lowerCamelCase__ : List[Any] = iter(self.loader )
return self
def lowerCamelCase_ ( self: Any ):
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
lowerCamelCase__ : str = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
lowerCamelCase__ : int = {}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Convert ModelOutput to tuple first
lowerCamelCase__ : str = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
lowerCamelCase__ : Tuple = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowerCamelCase__ : str = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
lowerCamelCase__ : List[str] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowerCamelCase__ : int = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
lowerCamelCase__ : List[str] = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCamelCase__ : Optional[Any] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCamelCase__ : int = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
lowerCamelCase__ : str = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
lowerCamelCase__ : Optional[int] = self._loader_batch_data.__class__(UpperCamelCase__ )
self._loader_batch_index += 1
return result
def lowerCamelCase_ ( self: List[Any] ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
lowerCamelCase__ : Optional[Any] = next(self.iterator )
lowerCamelCase__ : List[str] = self.infer(UpperCamelCase__ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCamelCase__ , torch.Tensor ):
lowerCamelCase__ : Optional[Any] = processed
else:
lowerCamelCase__ : Union[str, Any] = list(processed.keys() )[0]
lowerCamelCase__ : Any = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ : Any = len(UpperCamelCase__ )
else:
lowerCamelCase__ : List[str] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCamelCase__ : Union[str, Any] = observed_batch_size
# Setting internal index to unwrap the batch
lowerCamelCase__ : List[Any] = processed
lowerCamelCase__ : List[Any] = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class _lowercase ( _lowercase ):
def __init__( self: List[str] , UpperCamelCase__: Any , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any]=None ):
super().__init__(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __iter__( self: Union[str, Any] ):
lowerCamelCase__ : str = iter(self.loader )
lowerCamelCase__ : int = None
return self
def lowerCamelCase_ ( self: str ):
if self.subiterator is None:
lowerCamelCase__ : Optional[Any] = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
lowerCamelCase__ : Tuple = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
lowerCamelCase__ : Any = self.infer(next(self.iterator ) , **self.params )
lowerCamelCase__ : Union[str, Any] = next(self.subiterator )
return processed
class _lowercase ( _lowercase ):
def __iter__( self: List[Any] ):
lowerCamelCase__ : int = iter(self.loader )
return self
def lowerCamelCase_ ( self: Tuple ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
lowerCamelCase__ : List[str] = False
lowerCamelCase__ : Union[str, Any] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
lowerCamelCase__ : Any = self.loader_batch_item()
lowerCamelCase__ : Tuple = item.pop("""is_last""" )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
while not is_last:
lowerCamelCase__ : str = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(UpperCamelCase__ , torch.Tensor ):
lowerCamelCase__ : Dict = processed
else:
lowerCamelCase__ : Dict = list(processed.keys() )[0]
lowerCamelCase__ : Dict = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ : List[Any] = len(UpperCamelCase__ )
else:
lowerCamelCase__ : Dict = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCamelCase__ : str = observed_batch_size
lowerCamelCase__ : str = processed
lowerCamelCase__ : Optional[int] = 0
while self._loader_batch_index < self.loader_batch_size:
lowerCamelCase__ : List[Any] = self.loader_batch_item()
lowerCamelCase__ : str = item.pop("""is_last""" )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
else:
lowerCamelCase__ : Optional[Any] = processed
lowerCamelCase__ : Optional[int] = item.pop("""is_last""" )
accumulator.append(UpperCamelCase__ )
return accumulator
class _lowercase ( _lowercase ):
def __init__( self: Optional[int] , UpperCamelCase__: Dataset , UpperCamelCase__: str ):
lowerCamelCase__ : Union[str, Any] = dataset
lowerCamelCase__ : str = key
def __len__( self: Optional[Any] ):
return len(self.dataset )
def __getitem__( self: List[str] , UpperCamelCase__: Any ):
return self.dataset[i][self.key]
class _lowercase ( _lowercase ):
def __init__( self: Optional[int] , UpperCamelCase__: Dataset , UpperCamelCase__: str , UpperCamelCase__: str ):
lowerCamelCase__ : str = dataset
lowerCamelCase__ : Dict = keya
lowerCamelCase__ : List[str] = keya
def __len__( self: str ):
return len(self.dataset )
def __getitem__( self: List[str] , UpperCamelCase__: Union[str, Any] ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 631 | 0 |
'''simple docstring'''
import sys
import turtle
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> None:
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
_A : Any =turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
_A : Dict =[(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 700 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
_A : Dict ='''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 631 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
def __init__( self: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: List[str]=13 , UpperCamelCase__: List[str]=30 , UpperCamelCase__: List[Any]=2 , UpperCamelCase__: Tuple=3 , UpperCamelCase__: str=True , UpperCamelCase__: int=True , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Any=5 , UpperCamelCase__: Optional[Any]=4 , UpperCamelCase__: List[Any]=37 , UpperCamelCase__: List[Any]="gelu" , UpperCamelCase__: Optional[int]=0.1 , UpperCamelCase__: Tuple=0.1 , UpperCamelCase__: str=10 , UpperCamelCase__: Optional[int]=0.02 , UpperCamelCase__: Optional[Any]=None , UpperCamelCase__: List[Any]=2 , ):
lowerCamelCase__ : Union[str, Any] = parent
lowerCamelCase__ : str = batch_size
lowerCamelCase__ : str = image_size
lowerCamelCase__ : Union[str, Any] = patch_size
lowerCamelCase__ : int = num_channels
lowerCamelCase__ : List[Any] = is_training
lowerCamelCase__ : str = use_labels
lowerCamelCase__ : Any = hidden_size
lowerCamelCase__ : Dict = num_hidden_layers
lowerCamelCase__ : int = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : List[Any] = hidden_act
lowerCamelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCamelCase__ : str = attention_probs_dropout_prob
lowerCamelCase__ : Dict = type_sequence_label_size
lowerCamelCase__ : List[Any] = initializer_range
lowerCamelCase__ : List[str] = scope
lowerCamelCase__ : Union[str, Any] = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase__ : Optional[int] = (image_size // patch_size) ** 2
lowerCamelCase__ : int = num_patches + 1
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : List[str] = None
if self.use_labels:
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : str = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: Union[str, Any] ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowerCamelCase_ ( self: int , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : str = ViTModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Union[str, Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: int , UpperCamelCase__: Dict , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: int ):
lowerCamelCase__ : int = ViTForMaskedImageModeling(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Union[str, Any] = model(UpperCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase__ : List[Any] = 1
lowerCamelCase__ : str = ViTForMaskedImageModeling(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Union[str, Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : Union[str, Any] = self.type_sequence_label_size
lowerCamelCase__ : List[str] = ViTForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : str = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase__ : List[str] = 1
lowerCamelCase__ : List[Any] = ViTForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : List[str] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Tuple = self.prepare_config_and_inputs()
(
lowerCamelCase__
) : Optional[Any] = config_and_inputs
lowerCamelCase__ : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
a = (
{"""feature-extraction""": ViTModel, """image-classification""": ViTForImageClassification}
if is_torch_available()
else {}
)
a = True
a = False
a = False
a = False
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : List[Any] = ViTModelTester(self )
lowerCamelCase__ : List[Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self: List[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def lowerCamelCase_ ( self: List[Any] ):
pass
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[Any] = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : int = model_class(UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : int = [*signature.parameters.keys()]
lowerCamelCase__ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: Any ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Optional[int] = ViTModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> Union[str, Any]:
lowerCamelCase__ : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Union[str, Any] ):
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : int = ViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" ).to(UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = self.default_image_processor
lowerCamelCase__ : Any = prepare_img()
lowerCamelCase__ : str = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : int = model(**UpperCamelCase__ )
# verify the logits
lowerCamelCase__ : Union[str, Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : List[Any] = torch.tensor([-0.2_744, 0.8_215, -0.0_836] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self: Union[str, Any] ):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
lowerCamelCase__ : Optional[Any] = ViTModel.from_pretrained("""facebook/dino-vits8""" ).to(UpperCamelCase__ )
lowerCamelCase__ : int = ViTImageProcessor.from_pretrained("""facebook/dino-vits8""" , size=480 )
lowerCamelCase__ : List[str] = prepare_img()
lowerCamelCase__ : List[Any] = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" )
lowerCamelCase__ : Optional[int] = inputs.pixel_values.to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : int = model(UpperCamelCase__ , interpolate_pos_encoding=UpperCamelCase__ )
# verify the logits
lowerCamelCase__ : Dict = torch.Size((1, 3_601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , UpperCamelCase__ )
lowerCamelCase__ : int = torch.tensor(
[[4.2_340, 4.3_906, -6.6_692], [4.5_463, 1.8_928, -6.7_257], [4.4_429, 0.8_496, -5.8_585]] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Dict = ViTModel.from_pretrained("""facebook/dino-vits8""" , torch_dtype=torch.floataa , device_map="""auto""" )
lowerCamelCase__ : Tuple = self.default_image_processor
lowerCamelCase__ : str = prepare_img()
lowerCamelCase__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" )
lowerCamelCase__ : Dict = inputs.pixel_values.to(UpperCamelCase__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowerCamelCase__ : str = model(UpperCamelCase__ )
| 701 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_A : Any ={
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_A : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 631 | 0 |
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
_A : Dict =logging.get_logger(__name__)
enable_full_determinism()
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = UNetaDModel
a = """sample"""
@property
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Union[str, Any] = 4
lowerCamelCase__ : Union[str, Any] = 3
lowerCamelCase__ : int = (32, 32)
lowerCamelCase__ : Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = torch.tensor([10] ).to(UpperCamelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase_ ( self: Optional[Any] ):
return (3, 32, 32)
@property
def lowerCamelCase_ ( self: int ):
return (3, 32, 32)
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Dict = {
"""block_out_channels""": (32, 64),
"""down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""),
"""up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""),
"""attention_head_dim""": 3,
"""out_channels""": 3,
"""in_channels""": 3,
"""layers_per_block""": 2,
"""sample_size""": 32,
}
lowerCamelCase__ : List[Any] = self.dummy_input
return init_dict, inputs_dict
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = UNetaDModel
a = """sample"""
@property
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : List[str] = 4
lowerCamelCase__ : str = 4
lowerCamelCase__ : Any = (32, 32)
lowerCamelCase__ : Tuple = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = torch.tensor([10] ).to(UpperCamelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase_ ( self: List[str] ):
return (4, 32, 32)
@property
def lowerCamelCase_ ( self: Dict ):
return (4, 32, 32)
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Tuple = {
"""sample_size""": 32,
"""in_channels""": 4,
"""out_channels""": 4,
"""layers_per_block""": 2,
"""block_out_channels""": (32, 64),
"""attention_head_dim""": 32,
"""down_block_types""": ("""DownBlock2D""", """DownBlock2D"""),
"""up_block_types""": ("""UpBlock2D""", """UpBlock2D"""),
}
lowerCamelCase__ : str = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : int = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : int = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=UpperCamelCase__ )
model.to(UpperCamelCase__ )
lowerCamelCase__ : Dict = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def lowerCamelCase_ ( self: List[str] ):
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
lowerCamelCase__ : List[Any] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=UpperCamelCase__ )
model_accelerate.to(UpperCamelCase__ )
model_accelerate.eval()
lowerCamelCase__ : List[str] = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
lowerCamelCase__ : Tuple = noise.to(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = torch.tensor([10] * noise.shape[0] ).to(UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = model_accelerate(UpperCamelCase__ , UpperCamelCase__ )["""sample"""]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
lowerCamelCase__ : List[str] = UNetaDModel.from_pretrained(
"""fusing/unet-ldm-dummy-update""" , output_loading_info=UpperCamelCase__ , low_cpu_mem_usage=UpperCamelCase__ )
model_normal_load.to(UpperCamelCase__ )
model_normal_load.eval()
lowerCamelCase__ : Union[str, Any] = model_normal_load(UpperCamelCase__ , UpperCamelCase__ )["""sample"""]
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-3 )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Any = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" )
model.eval()
model.to(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
lowerCamelCase__ : Dict = noise.to(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = torch.tensor([10] * noise.shape[0] ).to(UpperCamelCase__ )
with torch.no_grad():
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , UpperCamelCase__ ).sample
lowerCamelCase__ : int = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCamelCase__ : int = torch.tensor([-13.3_258, -20.1_100, -15.9_873, -17.6_617, -23.0_596, -17.9_419, -13.3_675, -16.1_889, -12.3_800] )
# fmt: on
self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-3 ) )
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = UNetaDModel
a = """sample"""
@property
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: Dict=(32, 32) ):
lowerCamelCase__ : Dict = 4
lowerCamelCase__ : Tuple = 3
lowerCamelCase__ : Dict = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
lowerCamelCase__ : Any = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=UpperCamelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase_ ( self: int ):
return (3, 32, 32)
@property
def lowerCamelCase_ ( self: Dict ):
return (3, 32, 32)
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : List[Any] = {
"""block_out_channels""": [32, 64, 64, 64],
"""in_channels""": 3,
"""layers_per_block""": 1,
"""out_channels""": 3,
"""time_embedding_type""": """fourier""",
"""norm_eps""": 1e-6,
"""mid_block_scale_factor""": math.sqrt(2.0 ),
"""norm_num_groups""": None,
"""down_block_types""": [
"""SkipDownBlock2D""",
"""AttnSkipDownBlock2D""",
"""SkipDownBlock2D""",
"""SkipDownBlock2D""",
],
"""up_block_types""": [
"""SkipUpBlock2D""",
"""SkipUpBlock2D""",
"""AttnSkipUpBlock2D""",
"""SkipUpBlock2D""",
],
}
lowerCamelCase__ : int = self.dummy_input
return init_dict, inputs_dict
@slow
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : str = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(UpperCamelCase__ )
lowerCamelCase__ : Dict = self.dummy_input
lowerCamelCase__ : List[Any] = floats_tensor((4, 3) + (256, 256) ).to(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = noise
lowerCamelCase__ : Union[str, Any] = model(**UpperCamelCase__ )
assert image is not None, "Make sure output is not None"
@slow
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Union[str, Any] = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" )
model.to(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = 4
lowerCamelCase__ : str = 3
lowerCamelCase__ : Optional[int] = (256, 256)
lowerCamelCase__ : Union[str, Any] = torch.ones((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = torch.tensor(batch_size * [1e-4] ).to(UpperCamelCase__ )
with torch.no_grad():
lowerCamelCase__ : Tuple = model(UpperCamelCase__ , UpperCamelCase__ ).sample
lowerCamelCase__ : List[str] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowerCamelCase__ : int = torch.tensor([-4_842.8_691, -6_499.6_631, -3_800.1_953, -7_978.2_686, -10_980.7_129, -20_028.8_535, 8_148.2_822, 2_342.2_905, 567.7_608] )
# fmt: on
self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-2 ) )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Dict = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" )
model.to(UpperCamelCase__ )
lowerCamelCase__ : int = 4
lowerCamelCase__ : int = 3
lowerCamelCase__ : Any = (32, 32)
lowerCamelCase__ : List[Any] = torch.ones((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
lowerCamelCase__ : List[str] = torch.tensor(batch_size * [1e-4] ).to(UpperCamelCase__ )
with torch.no_grad():
lowerCamelCase__ : List[str] = model(UpperCamelCase__ , UpperCamelCase__ ).sample
lowerCamelCase__ : Optional[int] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowerCamelCase__ : Any = torch.tensor([-0.0_325, -0.0_900, -0.0_869, -0.0_332, -0.0_725, -0.0_270, -0.0_101, 0.0_227, 0.0_256] )
# fmt: on
self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-2 ) )
def lowerCamelCase_ ( self: Union[str, Any] ):
# not required for this model
pass
| 702 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Union[str, Any] =logging.get_logger(__name__)
_A : List[str] ={
'''MIT/ast-finetuned-audioset-10-10-0.4593''': (
'''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'''
),
}
class _lowercase ( _lowercase ):
a = """audio-spectrogram-transformer"""
def __init__( self: str , UpperCamelCase__: Any=768 , UpperCamelCase__: Union[str, Any]=12 , UpperCamelCase__: List[Any]=12 , UpperCamelCase__: int=3_072 , UpperCamelCase__: Optional[Any]="gelu" , UpperCamelCase__: Optional[int]=0.0 , UpperCamelCase__: Tuple=0.0 , UpperCamelCase__: Union[str, Any]=0.02 , UpperCamelCase__: Dict=1e-12 , UpperCamelCase__: List[str]=16 , UpperCamelCase__: List[str]=True , UpperCamelCase__: Any=10 , UpperCamelCase__: List[str]=10 , UpperCamelCase__: Any=1_024 , UpperCamelCase__: Optional[Any]=128 , **UpperCamelCase__: Union[str, Any] , ):
super().__init__(**UpperCamelCase__ )
lowerCamelCase__ : List[Any] = hidden_size
lowerCamelCase__ : int = num_hidden_layers
lowerCamelCase__ : List[str] = num_attention_heads
lowerCamelCase__ : Optional[int] = intermediate_size
lowerCamelCase__ : List[Any] = hidden_act
lowerCamelCase__ : List[Any] = hidden_dropout_prob
lowerCamelCase__ : str = attention_probs_dropout_prob
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : List[str] = layer_norm_eps
lowerCamelCase__ : List[Any] = patch_size
lowerCamelCase__ : List[str] = qkv_bias
lowerCamelCase__ : Dict = frequency_stride
lowerCamelCase__ : List[Any] = time_stride
lowerCamelCase__ : str = max_length
lowerCamelCase__ : Dict = num_mel_bins
| 631 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
_A : str =None
_A : str =logging.get_logger(__name__)
_A : Any ={'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
_A : List[str] ={
'''vocab_file''': {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/spiece.model''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json''',
},
}
_A : Any ={
'''google/fnet-base''': 512,
'''google/fnet-large''': 512,
}
_A : Optional[Any] ='''▁'''
class _lowercase ( _lowercase ):
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = ["""input_ids""", """token_type_ids"""]
a = FNetTokenizer
def __init__( self: Tuple , UpperCamelCase__: List[Any]=None , UpperCamelCase__: Union[str, Any]=None , UpperCamelCase__: Optional[Any]=False , UpperCamelCase__: Tuple=True , UpperCamelCase__: Any=True , UpperCamelCase__: List[str]="<unk>" , UpperCamelCase__: Union[str, Any]="[SEP]" , UpperCamelCase__: List[Any]="<pad>" , UpperCamelCase__: int="[CLS]" , UpperCamelCase__: Dict="[MASK]" , **UpperCamelCase__: Optional[Any] , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowerCamelCase__ : Optional[int] = (
AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ , normalized=UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ )
else mask_token
)
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , **UpperCamelCase__ , )
lowerCamelCase__ : Dict = do_lower_case
lowerCamelCase__ : Union[str, Any] = remove_space
lowerCamelCase__ : Tuple = keep_accents
lowerCamelCase__ : List[str] = vocab_file
lowerCamelCase__ : int = False if not self.vocab_file else True
def lowerCamelCase_ ( self: Any , UpperCamelCase__: List[int] , UpperCamelCase__: Optional[List[int]] = None ):
lowerCamelCase__ : Optional[Any] = [self.sep_token_id]
lowerCamelCase__ : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: List[int] , UpperCamelCase__: Optional[List[int]] = None ):
lowerCamelCase__ : List[str] = [self.sep_token_id]
lowerCamelCase__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: str , UpperCamelCase__: Optional[str] = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase__ : Any = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
return (out_vocab_file,)
| 703 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
_A : List[str] ='''examples/'''
_A : Any ={
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
_A : int ={
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
_A : int ='''README.md'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
with open(UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ : List[str] = f.read()
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = REPLACE_PATTERNS[pattern]
lowerCamelCase__ : Dict = replace.replace("""VERSION""" , UpperCamelCase )
lowerCamelCase__ : str = re_pattern.sub(UpperCamelCase , UpperCamelCase )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
for folder, directories, fnames in os.walk(UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(UpperCamelCase , UpperCamelCase ) , UpperCamelCase , pattern="""examples""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> List[Any]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCamelCase , UpperCamelCase , UpperCamelCase )
if not patch:
update_version_in_examples(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Optional[Any]:
lowerCamelCase__ : Dict = """🤗 Transformers currently provides the following architectures"""
lowerCamelCase__ : Dict = """1. Want to contribute a new model?"""
with open(UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ : int = f.readlines()
# Find the start of the list.
lowerCamelCase__ : Optional[int] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCamelCase__ : Optional[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
lowerCamelCase__ : List[Any] = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Optional[Any]:
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
lowerCamelCase__ : int = f.read()
lowerCamelCase__ : Optional[Any] = REPLACE_PATTERNS["""init"""][0].search(UpperCamelCase ).groups()[0]
return packaging.version.parse(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase=False ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
lowerCamelCase__ : List[str] = default_version.base_version
elif patch:
lowerCamelCase__ : Any = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
lowerCamelCase__ : List[Any] = f'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
lowerCamelCase__ : Any = input(f'''Which version are you releasing? [{default_version}]''' )
if len(UpperCamelCase ) == 0:
lowerCamelCase__ : Optional[int] = default_version
print(f'''Updating version to {version}.''' )
global_version_update(UpperCamelCase , patch=UpperCamelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def SCREAMING_SNAKE_CASE_ () -> List[str]:
lowerCamelCase__ : Optional[int] = get_version()
lowerCamelCase__ : Any = f'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
lowerCamelCase__ : Any = current_version.base_version
# Check with the user we got that right.
lowerCamelCase__ : List[Any] = input(f'''Which version are we developing now? [{dev_version}]''' )
if len(UpperCamelCase ) == 0:
lowerCamelCase__ : Dict = dev_version
print(f'''Updating version to {version}.''' )
global_version_update(UpperCamelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_A : List[Any] =argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
_A : List[str] =parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 631 | 0 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
_A : Dict =logging.get_logger(__name__)
_A : Optional[int] ={
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''',
}
class _lowercase ( _lowercase ):
a = """t5"""
a = ["""past_key_values"""]
a = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self: Any , UpperCamelCase__: Any=32_128 , UpperCamelCase__: Optional[int]=512 , UpperCamelCase__: str=64 , UpperCamelCase__: str=2_048 , UpperCamelCase__: Any=6 , UpperCamelCase__: Tuple=None , UpperCamelCase__: Dict=8 , UpperCamelCase__: List[str]=32 , UpperCamelCase__: int=128 , UpperCamelCase__: Any=0.1 , UpperCamelCase__: Union[str, Any]=1e-6 , UpperCamelCase__: Tuple=1.0 , UpperCamelCase__: str="relu" , UpperCamelCase__: Dict=True , UpperCamelCase__: List[Any]=True , UpperCamelCase__: Any=0 , UpperCamelCase__: int=1 , **UpperCamelCase__: List[str] , ):
lowerCamelCase__ : int = vocab_size
lowerCamelCase__ : str = d_model
lowerCamelCase__ : Tuple = d_kv
lowerCamelCase__ : str = d_ff
lowerCamelCase__ : List[Any] = num_layers
lowerCamelCase__ : Tuple = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCamelCase__ : str = num_heads
lowerCamelCase__ : Dict = relative_attention_num_buckets
lowerCamelCase__ : int = relative_attention_max_distance
lowerCamelCase__ : List[str] = dropout_rate
lowerCamelCase__ : Any = layer_norm_epsilon
lowerCamelCase__ : Dict = initializer_factor
lowerCamelCase__ : Tuple = feed_forward_proj
lowerCamelCase__ : Dict = use_cache
lowerCamelCase__ : List[Any] = self.feed_forward_proj.split("""-""" )
lowerCamelCase__ : List[Any] = act_info[-1]
lowerCamelCase__ : Optional[int] = act_info[0] == """gated"""
if len(UpperCamelCase__ ) > 1 and act_info[0] != "gated" or len(UpperCamelCase__ ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowerCamelCase__ : List[str] = """gelu_new"""
super().__init__(
pad_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__ , )
class _lowercase ( _lowercase ):
@property
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Optional[int] = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
lowerCamelCase__ : List[str] = """past_encoder_sequence + sequence"""
lowerCamelCase__ : Dict = {0: """batch"""}
lowerCamelCase__ : Dict = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
lowerCamelCase__ : int = {0: """batch""", 1: """decoder_sequence"""}
lowerCamelCase__ : Optional[Any] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction="""inputs""" )
return common_inputs
@property
def lowerCamelCase_ ( self: Optional[Any] ):
return 13
| 704 |
'''simple docstring'''
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_A : Union[str, Any] =False
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: str=32 ):
set_seed(0 )
lowerCamelCase__ : Optional[int] = UNetaDModel(sample_size=UpperCamelCase__ , in_channels=3 , out_channels=3 )
lowerCamelCase__ : List[Any] = torch.optim.SGD(model.parameters() , lr=0.0_001 )
return model, optimizer
@slow
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Optional[Any] = """cpu""" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
lowerCamelCase__ : List[Any] = DDPMScheduler(
num_train_timesteps=1_000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=UpperCamelCase__ , )
lowerCamelCase__ : Any = DDIMScheduler(
num_train_timesteps=1_000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=UpperCamelCase__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
lowerCamelCase__ : str = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(UpperCamelCase__ ) for _ in range(4 )]
lowerCamelCase__ : Tuple = [torch.randn((4, 3, 32, 32) ).to(UpperCamelCase__ ) for _ in range(4 )]
lowerCamelCase__ : Tuple = [torch.randint(0 , 1_000 , (4,) ).long().to(UpperCamelCase__ ) for _ in range(4 )]
# train with a DDPM scheduler
lowerCamelCase__ , lowerCamelCase__ : Any = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
lowerCamelCase__ : str = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCamelCase__ : str = model(UpperCamelCase__ , timesteps[i] ).sample
lowerCamelCase__ : Tuple = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
lowerCamelCase__ : Optional[Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCamelCase__ : Dict = model(UpperCamelCase__ , timesteps[i] ).sample
lowerCamelCase__ : Union[str, Any] = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
| 631 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_A : Optional[Any] ={
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Any =['''OwlViTFeatureExtractor''']
_A : str =['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[Any] =[
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
_A : List[str] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 705 |
'''simple docstring'''
from statistics import mean
import numpy as np
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list:
lowerCamelCase__ : Optional[int] = 0
# Number of processes finished
lowerCamelCase__ : Union[str, Any] = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
lowerCamelCase__ : Tuple = [0] * no_of_process
# List to include calculation results
lowerCamelCase__ : List[str] = [0] * no_of_process
# Sort by arrival time.
lowerCamelCase__ : Union[str, Any] = [burst_time[i] for i in np.argsort(UpperCamelCase )]
lowerCamelCase__ : List[Any] = [process_name[i] for i in np.argsort(UpperCamelCase )]
arrival_time.sort()
while no_of_process > finished_process_count:
lowerCamelCase__ : str = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
lowerCamelCase__ : Union[str, Any] = arrival_time[i]
lowerCamelCase__ : Any = 0
# Index showing the location of the process being performed
lowerCamelCase__ : Union[str, Any] = 0
# Saves the current response ratio.
lowerCamelCase__ : Any = 0
for i in range(0 , UpperCamelCase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
lowerCamelCase__ : Optional[int] = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
lowerCamelCase__ : int = temp
lowerCamelCase__ : str = i
# Calculate the turn around time
lowerCamelCase__ : Optional[int] = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
lowerCamelCase__ : List[str] = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list:
lowerCamelCase__ : int = [0] * no_of_process
for i in range(0 , UpperCamelCase ):
lowerCamelCase__ : Optional[Any] = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
_A : List[str] =5
_A : Optional[Any] =['''A''', '''B''', '''C''', '''D''', '''E''']
_A : Optional[int] =[1, 2, 3, 4, 5]
_A : Dict =[1, 2, 3, 4, 5]
_A : Any =calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
_A : Optional[int] =calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''')
for i in range(0, no_of_process):
print(
F'{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'
F'{turn_around_time[i]}\t\t\t{waiting_time[i]}'
)
print(F'average waiting time : {mean(waiting_time):.5f}')
print(F'average turn around time : {mean(turn_around_time):.5f}')
| 631 | 0 |
'''simple docstring'''
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
lowerCamelCase__ : List[str] = {}
lowerCamelCase__ : Optional[Any] = job["""started_at"""]
lowerCamelCase__ : List[Any] = job["""completed_at"""]
lowerCamelCase__ : Union[str, Any] = date_parser.parse(UpperCamelCase )
lowerCamelCase__ : Dict = date_parser.parse(UpperCamelCase )
lowerCamelCase__ : int = round((end_datetime - start_datetime).total_seconds() / 60.0 )
lowerCamelCase__ : Union[str, Any] = start
lowerCamelCase__ : Any = end
lowerCamelCase__ : List[Any] = duration_in_min
return job_info
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=None ) -> Tuple:
lowerCamelCase__ : Optional[Any] = None
if token is not None:
lowerCamelCase__ : List[Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
lowerCamelCase__ : Optional[Any] = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
lowerCamelCase__ : Optional[int] = requests.get(UpperCamelCase , headers=UpperCamelCase ).json()
lowerCamelCase__ : List[Any] = {}
try:
job_time.update({job["""name"""]: extract_time_from_single_job(UpperCamelCase ) for job in result["""jobs"""]} )
lowerCamelCase__ : List[str] = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(UpperCamelCase ):
lowerCamelCase__ : Optional[int] = requests.get(url + f'''&page={i + 2}''' , headers=UpperCamelCase ).json()
job_time.update({job["""name"""]: extract_time_from_single_job(UpperCamelCase ) for job in result["""jobs"""]} )
return job_time
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
_A : List[str] =argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
_A : Dict =parser.parse_args()
_A : Optional[Any] =get_job_time(args.workflow_run_id)
_A : Optional[int] =dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'{k}: {v["duration"]}') | 706 |
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 631 | 0 |
'''simple docstring'''
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> Tuple:
try:
lowerCamelCase__ : Dict = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowerCamelCase__ : Union[str, Any] = default
else:
# KEY is set, convert it to True or False.
try:
lowerCamelCase__ : Optional[Any] = strtobool(UpperCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
_A : Dict =parse_flag_from_env('''RUN_SLOW''', default=False)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Tuple:
return unittest.skip("""Test was skipped""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
return unittest.skipUnless(_run_slow_tests , """test is slow""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[Any]:
return unittest.skipUnless(not torch.cuda.is_available() , """test requires only a CPU""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[Any]:
return unittest.skipUnless(torch.cuda.is_available() , """test requires a GPU""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Dict:
return unittest.skipUnless(is_xpu_available() , """test requires a XPU""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Dict:
return unittest.skipUnless(is_mps_available() , """test requires a `mps` backend support in `torch`""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Tuple:
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , """test requires the Hugging Face suite""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[int]:
return unittest.skipUnless(is_bnb_available() , """test requires the bitsandbytes library""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[str]:
return unittest.skipUnless(is_tpu_available() , """test requires TPU""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Union[str, Any]:
return unittest.skipUnless(torch.cuda.device_count() == 1 , """test requires a GPU""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
return unittest.skipUnless(torch.xpu.device_count() == 1 , """test requires a XPU""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[Any]:
return unittest.skipUnless(torch.cuda.device_count() > 1 , """test requires multiple GPUs""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Union[str, Any]:
return unittest.skipUnless(torch.xpu.device_count() > 1 , """test requires multiple XPUs""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Union[str, Any]:
return unittest.skipUnless(is_safetensors_available() , """test requires safetensors""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[Any]:
return unittest.skipUnless(is_deepspeed_available() , """test requires DeepSpeed""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> int:
return unittest.skipUnless(is_torch_version(""">=""" , """1.12.0""" ) , """test requires torch version >= 1.12.0""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase=None , UpperCamelCase=None ) -> List[Any]:
if test_case is None:
return partial(UpperCamelCase , version=UpperCamelCase )
return unittest.skipUnless(is_torch_version(""">=""" , UpperCamelCase ) , f'''test requires torch version >= {version}''' )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[Any]:
return unittest.skipUnless(is_tensorboard_available() , """test requires Tensorboard""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[str]:
return unittest.skipUnless(is_wandb_available() , """test requires wandb""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[Any]:
return unittest.skipUnless(is_comet_ml_available() , """test requires comet_ml""" )(UpperCamelCase )
_A : Union[str, Any] =(
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Any:
return unittest.skipUnless(
_atleast_one_tracker_available , """test requires at least one tracker to be available and for `comet_ml` to not be installed""" , )(UpperCamelCase )
class _lowercase ( unittest.TestCase ):
a = True
@classmethod
def lowerCamelCase_ ( cls: Dict ):
lowerCamelCase__ : str = tempfile.mkdtemp()
@classmethod
def lowerCamelCase_ ( cls: Optional[int] ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def lowerCamelCase_ ( self: int ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("""**/*""" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCamelCase__ )
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Optional[int] ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: Union[mock.Mock, List[mock.Mock]] ):
lowerCamelCase__ : List[str] = mocks if isinstance(UpperCamelCase__ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[str]:
lowerCamelCase__ : Tuple = AcceleratorState()
lowerCamelCase__ : List[str] = tensor[None].clone().to(state.device )
lowerCamelCase__ : Optional[Any] = gather(UpperCamelCase ).cpu()
lowerCamelCase__ : Optional[Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , UpperCamelCase ):
return False
return True
class _lowercase :
def __init__( self: Dict , UpperCamelCase__: List[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : Any = returncode
lowerCamelCase__ : Tuple = stdout
lowerCamelCase__ : Union[str, Any] = stderr
async def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Any:
while True:
lowerCamelCase__ : str = await stream.readline()
if line:
callback(UpperCamelCase )
else:
break
async def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=False , UpperCamelCase=False ) -> _RunOutput:
if echo:
print("""\nRunning: """ , """ """.join(UpperCamelCase ) )
lowerCamelCase__ : Optional[Any] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=UpperCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowerCamelCase__ : Any = []
lowerCamelCase__ : Union[str, Any] = []
def tee(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase="" ):
lowerCamelCase__ : Union[str, Any] = line.decode("""utf-8""" ).rstrip()
sink.append(UpperCamelCase )
if not quiet:
print(UpperCamelCase , UpperCamelCase , file=UpperCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda UpperCamelCase : tee(UpperCamelCase , UpperCamelCase , sys.stdout , label="""stdout:""" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda UpperCamelCase : tee(UpperCamelCase , UpperCamelCase , sys.stderr , label="""stderr:""" ) ) ),
] , timeout=UpperCamelCase , )
return _RunOutput(await p.wait() , UpperCamelCase , UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=180 , UpperCamelCase=False , UpperCamelCase=True ) -> _RunOutput:
lowerCamelCase__ : Dict = asyncio.get_event_loop()
lowerCamelCase__ : int = loop.run_until_complete(
_stream_subprocess(UpperCamelCase , env=UpperCamelCase , stdin=UpperCamelCase , timeout=UpperCamelCase , quiet=UpperCamelCase , echo=UpperCamelCase ) )
lowerCamelCase__ : str = """ """.join(UpperCamelCase )
if result.returncode > 0:
lowerCamelCase__ : int = """\n""".join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
return result
class _lowercase ( _lowercase ):
pass
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> Dict:
try:
lowerCamelCase__ : Union[str, Any] = subprocess.check_output(UpperCamelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(UpperCamelCase , """decode""" ):
lowerCamelCase__ : List[str] = output.decode("""utf-8""" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'''Command `{' '.join(UpperCamelCase )}` failed with the following error:\n\n{e.output.decode()}''' ) from e
| 707 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
def __init__( self: List[str] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str]=13 , UpperCamelCase__: Optional[int]=30 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: List[str]=3 , UpperCamelCase__: List[str]=True , UpperCamelCase__: Any=True , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Any=5 , UpperCamelCase__: Optional[Any]=4 , UpperCamelCase__: Dict=37 , UpperCamelCase__: List[Any]="gelu" , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: List[Any]=10 , UpperCamelCase__: Tuple=0.02 , UpperCamelCase__: Optional[int]=3 , UpperCamelCase__: Dict=0.6 , UpperCamelCase__: int=None , ):
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : Optional[Any] = patch_size
lowerCamelCase__ : Any = num_channels
lowerCamelCase__ : Any = is_training
lowerCamelCase__ : Union[str, Any] = use_labels
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : List[str] = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : str = intermediate_size
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : Any = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : int = initializer_range
lowerCamelCase__ : List[str] = mask_ratio
lowerCamelCase__ : List[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowerCamelCase__ : str = (image_size // patch_size) ** 2
lowerCamelCase__ : Optional[int] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[int] = None
if self.use_labels:
lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Any = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: str ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[int] , UpperCamelCase__: int ):
lowerCamelCase__ : Tuple = ViTMAEModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: str , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict ):
lowerCamelCase__ : int = ViTMAEForPreTraining(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ )
lowerCamelCase__ : Any = (self.image_size // self.patch_size) ** 2
lowerCamelCase__ : str = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowerCamelCase__ : Dict = 1
lowerCamelCase__ : Optional[int] = ViTMAEForPreTraining(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
lowerCamelCase__ : Tuple = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Optional[int] = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any = config_and_inputs
lowerCamelCase__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
a = {"""feature-extraction""": ViTMAEModel} if is_torch_available() else {}
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Tuple = ViTMAEModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self: Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def lowerCamelCase_ ( self: Dict ):
pass
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : str = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase__ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Any = model_class(UpperCamelCase__ )
lowerCamelCase__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Any = [*signature.parameters.keys()]
lowerCamelCase__ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Dict , UpperCamelCase__: Optional[int] ):
# make masks reproducible
np.random.seed(2 )
lowerCamelCase__ : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
lowerCamelCase__ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCamelCase__ : Tuple = torch.from_numpy(UpperCamelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowerCamelCase__ : Tuple = pt_noise
super().check_pt_tf_models(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCamelCase__ : Optional[int] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Optional[int] = outputs[0].cpu().numpy()
lowerCamelCase__ : List[str] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : List[str] = model_class.from_pretrained(UpperCamelCase__ )
model.to(UpperCamelCase__ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
# Make sure we don't have nans
lowerCamelCase__ : Dict = after_outputs[0].cpu().numpy()
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase__ , 1e-5 )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self: Any ):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self: List[str] ):
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def lowerCamelCase_ ( self: Tuple ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase_ ( self: Optional[int] ):
pass
@slow
def lowerCamelCase_ ( self: List[str] ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Tuple = ViTMAEModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> List[Any]:
lowerCamelCase__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: List[str] ):
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self: Tuple ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowerCamelCase__ : str = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(UpperCamelCase__ )
lowerCamelCase__ : Tuple = self.default_image_processor
lowerCamelCase__ : List[str] = prepare_img()
lowerCamelCase__ : int = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowerCamelCase__ : List[str] = ViTMAEConfig()
lowerCamelCase__ : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowerCamelCase__ : Any = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
lowerCamelCase__ : List[Any] = model(**UpperCamelCase__ , noise=torch.from_numpy(UpperCamelCase__ ).to(device=UpperCamelCase__ ) )
# verify the logits
lowerCamelCase__ : List[str] = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : str = torch.tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCamelCase__ ) , atol=1e-4 ) )
| 631 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _lowercase :
a = 42
# setable values
a = 42
a = 42
a = None
@classmethod
def lowerCamelCase_ ( cls: Tuple , UpperCamelCase__: CommonSchedulerState , UpperCamelCase__: jnp.ndarray , UpperCamelCase__: jnp.ndarray ):
return cls(common=UpperCamelCase__ , init_noise_sigma=UpperCamelCase__ , timesteps=UpperCamelCase__ )
@dataclass
class _lowercase ( _lowercase ):
a = 42
class _lowercase ( _lowercase , _lowercase ):
a = [e.name for e in FlaxKarrasDiffusionSchedulers]
a = 42
@property
def lowerCamelCase_ ( self: Tuple ):
return True
@register_to_config
def __init__( self: Tuple , UpperCamelCase__: int = 1_000 , UpperCamelCase__: float = 0.0_001 , UpperCamelCase__: float = 0.02 , UpperCamelCase__: str = "linear" , UpperCamelCase__: Optional[jnp.ndarray] = None , UpperCamelCase__: str = "fixed_small" , UpperCamelCase__: bool = True , UpperCamelCase__: str = "epsilon" , UpperCamelCase__: jnp.dtype = jnp.floataa , ):
lowerCamelCase__ : Optional[Any] = dtype
def lowerCamelCase_ ( self: Any , UpperCamelCase__: Optional[CommonSchedulerState] = None ):
if common is None:
lowerCamelCase__ : int = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowerCamelCase__ : List[Any] = jnp.array(1.0 , dtype=self.dtype )
lowerCamelCase__ : Tuple = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=UpperCamelCase__ , init_noise_sigma=UpperCamelCase__ , timesteps=UpperCamelCase__ , )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: DDPMSchedulerState , UpperCamelCase__: jnp.ndarray , UpperCamelCase__: Optional[int] = None ):
return sample
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: DDPMSchedulerState , UpperCamelCase__: int , UpperCamelCase__: Tuple = () ):
lowerCamelCase__ : int = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowerCamelCase__ : Optional[int] = (jnp.arange(0 , UpperCamelCase__ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=UpperCamelCase__ , timesteps=UpperCamelCase__ , )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: DDPMSchedulerState , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Any=None , UpperCamelCase__: Union[str, Any]=None ):
lowerCamelCase__ : List[Any] = state.common.alphas_cumprod[t]
lowerCamelCase__ : Any = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowerCamelCase__ : Tuple = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowerCamelCase__ : str = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowerCamelCase__ : str = jnp.clip(UpperCamelCase__ , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowerCamelCase__ : str = jnp.log(jnp.clip(UpperCamelCase__ , a_min=1e-20 ) )
elif variance_type == "fixed_large":
lowerCamelCase__ : Dict = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowerCamelCase__ : Any = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowerCamelCase__ : List[str] = variance
lowerCamelCase__ : Union[str, Any] = state.common.betas[t]
lowerCamelCase__ : List[Any] = (predicted_variance + 1) / 2
lowerCamelCase__ : Tuple = frac * max_log + (1 - frac) * min_log
return variance
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: DDPMSchedulerState , UpperCamelCase__: jnp.ndarray , UpperCamelCase__: int , UpperCamelCase__: jnp.ndarray , UpperCamelCase__: Optional[jax.random.KeyArray] = None , UpperCamelCase__: bool = True , ):
lowerCamelCase__ : Tuple = timestep
if key is None:
lowerCamelCase__ : List[Any] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowerCamelCase__ : Any = jnp.split(UpperCamelCase__ , sample.shape[1] , axis=1 )
else:
lowerCamelCase__ : Optional[int] = None
# 1. compute alphas, betas
lowerCamelCase__ : Any = state.common.alphas_cumprod[t]
lowerCamelCase__ : Any = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowerCamelCase__ : Any = 1 - alpha_prod_t
lowerCamelCase__ : Union[str, Any] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowerCamelCase__ : Tuple = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowerCamelCase__ : Dict = model_output
elif self.config.prediction_type == "v_prediction":
lowerCamelCase__ : List[Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
""" for the FlaxDDPMScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowerCamelCase__ : Dict = jnp.clip(UpperCamelCase__ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase__ : Union[str, Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowerCamelCase__ : Tuple = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase__ : Optional[int] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowerCamelCase__ : List[str] = jax.random.split(UpperCamelCase__ , num=1 )
lowerCamelCase__ : List[Any] = jax.random.normal(UpperCamelCase__ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(UpperCamelCase__ , UpperCamelCase__ , predicted_variance=UpperCamelCase__ ) ** 0.5) * noise
lowerCamelCase__ : int = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowerCamelCase__ : int = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=UpperCamelCase__ , state=UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: DDPMSchedulerState , UpperCamelCase__: jnp.ndarray , UpperCamelCase__: jnp.ndarray , UpperCamelCase__: jnp.ndarray , ):
return add_noise_common(state.common , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: DDPMSchedulerState , UpperCamelCase__: jnp.ndarray , UpperCamelCase__: jnp.ndarray , UpperCamelCase__: jnp.ndarray , ):
return get_velocity_common(state.common , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __len__( self: int ):
return self.config.num_train_timesteps
| 708 |
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowercase ( _lowercase ):
a = """"""
a = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
a = None # compression type in fsspec. ex: "gzip"
a = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self: str , UpperCamelCase__: str = "" , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Optional[dict] = None , **UpperCamelCase__: List[Any] ):
super().__init__(self , **UpperCamelCase__ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCamelCase__ : List[Any] = fsspec.open(
UpperCamelCase__ , mode="""rb""" , protocol=UpperCamelCase__ , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowerCamelCase__ : str = os.path.basename(self.file.path.split("""::""" )[0] )
lowerCamelCase__ : Union[str, Any] = (
self.compressed_name[: self.compressed_name.rindex(""".""" )]
if """.""" in self.compressed_name
else self.compressed_name
)
lowerCamelCase__ : Tuple = None
@classmethod
def lowerCamelCase_ ( cls: Optional[int] , UpperCamelCase__: Optional[int] ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(UpperCamelCase__ ).lstrip("""/""" )
def lowerCamelCase_ ( self: Tuple ):
if self.dir_cache is None:
lowerCamelCase__ : Dict = {**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name}
lowerCamelCase__ : int = {f["""name"""]: f}
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: str ):
return self.file.open().read()
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: str , UpperCamelCase__: str = "rb" , UpperCamelCase__: Optional[int]=None , UpperCamelCase__: Tuple=True , UpperCamelCase__: Tuple=None , **UpperCamelCase__: Optional[Any] , ):
lowerCamelCase__ : Union[str, Any] = self._strip_protocol(UpperCamelCase__ )
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class _lowercase ( _lowercase ):
a = """bz2"""
a = """bz2"""
a = """.bz2"""
class _lowercase ( _lowercase ):
a = """gzip"""
a = """gzip"""
a = """.gz"""
class _lowercase ( _lowercase ):
a = """lz4"""
a = """lz4"""
a = """.lz4"""
class _lowercase ( _lowercase ):
a = """xz"""
a = """xz"""
a = """.xz"""
class _lowercase ( _lowercase ):
a = """zstd"""
a = """zstd"""
a = """.zst"""
def __init__( self: int , UpperCamelCase__: str , UpperCamelCase__: str = "rb" , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Optional[dict] = None , UpperCamelCase__: int = DEFAULT_BLOCK_SIZE , **UpperCamelCase__: Dict , ):
super().__init__(
fo=UpperCamelCase__ , mode=UpperCamelCase__ , target_protocol=UpperCamelCase__ , target_options=UpperCamelCase__ , block_size=UpperCamelCase__ , **UpperCamelCase__ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCamelCase__ : Tuple = self.file.__enter__
class _lowercase :
def __init__( self: Optional[int] , UpperCamelCase__: Any ):
lowerCamelCase__ : Optional[int] = file_
def __enter__( self: List[Any] ):
self._file.__enter__()
return self
def __exit__( self: Any , *UpperCamelCase__: str , **UpperCamelCase__: Any ):
self._file.__exit__(*UpperCamelCase__ , **UpperCamelCase__ )
def __iter__( self: Any ):
return iter(self._file )
def lowerCamelCase_ ( self: List[Any] ):
return next(self._file )
def __getattr__( self: List[str] , UpperCamelCase__: Dict ):
return getattr(self._file , UpperCamelCase__ )
def fixed_enter(*UpperCamelCase__: Union[str, Any] , **UpperCamelCase__: List[str] ):
return WrappedFile(_enter(*UpperCamelCase__ , **UpperCamelCase__ ) )
lowerCamelCase__ : Optional[Any] = fixed_enter
| 631 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_A : Optional[Any] =TypeVar('''T''')
class _lowercase ( Generic[T] ):
def __init__( self: List[str] , UpperCamelCase__: T ):
lowerCamelCase__ : Union[str, Any] = data
lowerCamelCase__ : Node[T] | None = None
def __str__( self: str ):
return F'''{self.data}'''
class _lowercase ( Generic[T] ):
def __init__( self: List[Any] ):
lowerCamelCase__ : Node[T] | None = None
def __iter__( self: Optional[int] ):
lowerCamelCase__ : List[Any] = self.top
while node:
yield node.data
lowerCamelCase__ : Union[str, Any] = node.next
def __str__( self: Tuple ):
return "->".join([str(UpperCamelCase__ ) for item in self] )
def __len__( self: Optional[int] ):
return len(tuple(iter(self ) ) )
def lowerCamelCase_ ( self: Tuple ):
return self.top is None
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: T ):
lowerCamelCase__ : Any = Node(UpperCamelCase__ )
if not self.is_empty():
lowerCamelCase__ : List[Any] = self.top
lowerCamelCase__ : List[str] = node
def lowerCamelCase_ ( self: Tuple ):
if self.is_empty():
raise IndexError("""pop from empty stack""" )
assert isinstance(self.top , UpperCamelCase__ )
lowerCamelCase__ : Dict = self.top
lowerCamelCase__ : Optional[int] = self.top.next
return pop_node.data
def lowerCamelCase_ ( self: int ):
if self.is_empty():
raise IndexError("""peek from empty stack""" )
assert self.top is not None
return self.top.data
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Any = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 709 |
'''simple docstring'''
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A : int =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
print("""Loading config file...""" )
def flatten_yaml_as_dict(UpperCamelCase , UpperCamelCase="" , UpperCamelCase="." ):
lowerCamelCase__ : Optional[int] = []
for k, v in d.items():
lowerCamelCase__ : Optional[int] = parent_key + sep + k if parent_key else k
if isinstance(UpperCamelCase , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(UpperCamelCase , UpperCamelCase , sep=UpperCamelCase ).items() )
else:
items.append((new_key, v) )
return dict(UpperCamelCase )
lowerCamelCase__ : Any = argparse.Namespace()
with open(UpperCamelCase , """r""" ) as yaml_file:
try:
lowerCamelCase__ : int = yaml.load(UpperCamelCase , Loader=yaml.FullLoader )
lowerCamelCase__ : Tuple = flatten_yaml_as_dict(UpperCamelCase )
for k, v in flat_cfg.items():
setattr(UpperCamelCase , UpperCamelCase , UpperCamelCase )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(UpperCamelCase , str(UpperCamelCase ) ) )
return config
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
lowerCamelCase__ : Union[str, Any] = MobileViTVaConfig()
lowerCamelCase__ : str = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
lowerCamelCase__ : Optional[Any] = 1000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
lowerCamelCase__ : int = 384
else:
lowerCamelCase__ : Optional[int] = 256
lowerCamelCase__ : str = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
lowerCamelCase__ : Tuple = 21000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
lowerCamelCase__ : str = 384
else:
lowerCamelCase__ : Any = 256
lowerCamelCase__ : int = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
lowerCamelCase__ : Dict = 151
lowerCamelCase__ : str = 512
lowerCamelCase__ : List[Any] = """ade20k-id2label.json"""
lowerCamelCase__ : Union[str, Any] = True
elif task_name.startswith("""voc_""" ):
lowerCamelCase__ : Tuple = 21
lowerCamelCase__ : Optional[int] = 512
lowerCamelCase__ : List[Any] = """pascal-voc-id2label.json"""
lowerCamelCase__ : Tuple = True
# orig_config
lowerCamelCase__ : Optional[int] = load_orig_config_file(UpperCamelCase )
assert getattr(UpperCamelCase , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model"
lowerCamelCase__ : int = getattr(UpperCamelCase , """model.classification.mitv2.width_multiplier""" , 1.0 )
assert (
getattr(UpperCamelCase , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
lowerCamelCase__ : Tuple = getattr(UpperCamelCase , """model.classification.activation.name""" , """swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
lowerCamelCase__ : Any = getattr(UpperCamelCase , """model.segmentation.output_stride""" , 16 )
if "_deeplabv3" in task_name:
lowerCamelCase__ : str = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] )
lowerCamelCase__ : Tuple = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_out_channels""" , 512 )
lowerCamelCase__ : List[Any] = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 )
# id2label
lowerCamelCase__ : Tuple = """huggingface/label-files"""
lowerCamelCase__ : Optional[Any] = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase__ : Union[str, Any] = {int(UpperCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : int = idalabel
lowerCamelCase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any:
lowerCamelCase__ : List[Any] = dct.pop(UpperCamelCase )
lowerCamelCase__ : Dict = val
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> Tuple:
if base_model:
lowerCamelCase__ : Optional[int] = """"""
else:
lowerCamelCase__ : Optional[Any] = """mobilevitv2."""
lowerCamelCase__ : List[str] = []
for k in state_dict.keys():
if k[:8] == "encoder.":
lowerCamelCase__ : Optional[Any] = k[8:]
else:
lowerCamelCase__ : Optional[Any] = k
if ".block." in k:
lowerCamelCase__ : Dict = k_new.replace(""".block.""" , """.""" )
if ".conv." in k:
lowerCamelCase__ : List[Any] = k_new.replace(""".conv.""" , """.convolution.""" )
if ".norm." in k:
lowerCamelCase__ : str = k_new.replace(""".norm.""" , """.normalization.""" )
if "conv_1." in k:
lowerCamelCase__ : Any = k_new.replace("""conv_1.""" , f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
lowerCamelCase__ : Optional[Any] = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
lowerCamelCase__ : Dict = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" )
if ".red_1x1." in k:
lowerCamelCase__ : str = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
lowerCamelCase__ : List[str] = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
lowerCamelCase__ : Optional[Any] = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
lowerCamelCase__ : Dict = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
lowerCamelCase__ : int = [0, 1]
elif i == 4:
lowerCamelCase__ : str = [0, 1, 2, 3]
elif i == 5:
lowerCamelCase__ : Dict = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
lowerCamelCase__ : List[Any] = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
lowerCamelCase__ : Optional[int] = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
lowerCamelCase__ : Optional[int] = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
lowerCamelCase__ : str = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" )
if "pre_norm_attn.1." in k:
lowerCamelCase__ : str = k_new.replace("""pre_norm_attn.1.""" , """attention.""" )
if "pre_norm_ffn.0." in k:
lowerCamelCase__ : Optional[Any] = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" )
if "pre_norm_ffn.1." in k:
lowerCamelCase__ : Union[str, Any] = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
lowerCamelCase__ : List[Any] = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" )
if "classifier.1." in k:
lowerCamelCase__ : Tuple = k_new.replace("""classifier.1.""" , """classifier.""" )
if "seg_head." in k:
lowerCamelCase__ : Optional[int] = k_new.replace("""seg_head.""" , """segmentation_head.""" )
if ".aspp_layer." in k:
lowerCamelCase__ : Any = k_new.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in k:
lowerCamelCase__ : Any = k_new.replace(""".aspp_pool.""" , """.""" )
rename_keys.append((k, k_new) )
return rename_keys
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[str]:
lowerCamelCase__ : Union[str, Any] = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(UpperCamelCase )
for k in keys_to_ignore:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Dict:
lowerCamelCase__ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
lowerCamelCase__ : Tuple = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCamelCase__ : str = get_mobilevitva_config(UpperCamelCase , UpperCamelCase )
# load original state_dict
lowerCamelCase__ : List[str] = torch.load(UpperCamelCase , map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
lowerCamelCase__ : int = MobileViTVaForSemanticSegmentation(UpperCamelCase ).eval()
lowerCamelCase__ : Tuple = False
else:
lowerCamelCase__ : int = MobileViTVaForImageClassification(UpperCamelCase ).eval()
lowerCamelCase__ : Optional[Any] = False
# remove and rename some keys of load the original model
lowerCamelCase__ : Tuple = checkpoint
remove_unused_keys(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = create_rename_keys(UpperCamelCase , base_model=UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# load modified state_dict
model.load_state_dict(UpperCamelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCamelCase__ : int = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowerCamelCase__ : Dict = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowerCamelCase__ : str = model(**UpperCamelCase )
# verify classification model
if task_name.startswith("""imagenet""" ):
lowerCamelCase__ : Dict = outputs.logits
lowerCamelCase__ : Optional[Any] = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
lowerCamelCase__ : Optional[Any] = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01] )
assert torch.allclose(logits[0, :3] , UpperCamelCase , atol=1E-4 )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_A : Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
_A : Dict =parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 631 | 0 |
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class _lowercase ( _lowercase ):
def __init__( self: Optional[int] , UpperCamelCase__: Distribution , UpperCamelCase__: Dict=None , UpperCamelCase__: Dict=None , UpperCamelCase__: Optional[int]=0 ):
lowerCamelCase__ : List[str] = 1.0 if scale is None else scale
lowerCamelCase__ : List[str] = 0.0 if loc is None else loc
super().__init__(UpperCamelCase__ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=UpperCamelCase__ )] )
@property
def lowerCamelCase_ ( self: Optional[int] ):
return self.base_dist.mean * self.scale + self.loc
@property
def lowerCamelCase_ ( self: int ):
return self.base_dist.variance * self.scale**2
@property
def lowerCamelCase_ ( self: str ):
return self.variance.sqrt()
class _lowercase ( nn.Module ):
def __init__( self: List[str] , UpperCamelCase__: int , UpperCamelCase__: Dict[str, int] , UpperCamelCase__: Callable[..., Tuple[torch.Tensor]] , **UpperCamelCase__: Tuple ):
super().__init__(**UpperCamelCase__ )
lowerCamelCase__ : List[Any] = args_dim
lowerCamelCase__ : int = nn.ModuleList([nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) for dim in args_dim.values()] )
lowerCamelCase__ : Optional[Any] = domain_map
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: torch.Tensor ):
lowerCamelCase__ : str = [proj(UpperCamelCase__ ) for proj in self.proj]
return self.domain_map(*UpperCamelCase__ )
class _lowercase ( nn.Module ):
def __init__( self: Dict , UpperCamelCase__: Optional[Any] ):
super().__init__()
lowerCamelCase__ : List[str] = function
def lowerCamelCase_ ( self: Any , UpperCamelCase__: int , *UpperCamelCase__: Optional[Any] ):
return self.function(UpperCamelCase__ , *UpperCamelCase__ )
class _lowercase :
a = 42
a = 42
a = 42
def __init__( self: Any , UpperCamelCase__: int = 1 ):
lowerCamelCase__ : List[str] = dim
lowerCamelCase__ : List[str] = {k: dim * self.args_dim[k] for k in self.args_dim}
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Dict ):
if self.dim == 1:
return self.distribution_class(*UpperCamelCase__ )
else:
return Independent(self.distribution_class(*UpperCamelCase__ ) , 1 )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Optional[torch.Tensor] = None , UpperCamelCase__: Optional[torch.Tensor] = None , ):
lowerCamelCase__ : str = self._base_distribution(UpperCamelCase__ )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(UpperCamelCase__ , loc=UpperCamelCase__ , scale=UpperCamelCase__ , event_dim=self.event_dim )
@property
def lowerCamelCase_ ( self: List[Any] ):
return () if self.dim == 1 else (self.dim,)
@property
def lowerCamelCase_ ( self: Union[str, Any] ):
return len(self.event_shape )
@property
def lowerCamelCase_ ( self: str ):
return 0.0
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: int ):
return ParameterProjection(
in_features=UpperCamelCase__ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def lowerCamelCase_ ( self: List[Any] , *UpperCamelCase__: torch.Tensor ):
raise NotImplementedError()
@staticmethod
def lowerCamelCase_ ( UpperCamelCase__: torch.Tensor ):
return (x + torch.sqrt(torch.square(UpperCamelCase__ ) + 4.0 )) / 2.0
class _lowercase ( _lowercase ):
a = {"""df""": 1, """loc""": 1, """scale""": 1}
a = StudentT
@classmethod
def lowerCamelCase_ ( cls: Union[str, Any] , UpperCamelCase__: torch.Tensor , UpperCamelCase__: torch.Tensor , UpperCamelCase__: torch.Tensor ):
lowerCamelCase__ : Tuple = cls.squareplus(UpperCamelCase__ ).clamp_min(torch.finfo(scale.dtype ).eps )
lowerCamelCase__ : Union[str, Any] = 2.0 + cls.squareplus(UpperCamelCase__ )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class _lowercase ( _lowercase ):
a = {"""loc""": 1, """scale""": 1}
a = Normal
@classmethod
def lowerCamelCase_ ( cls: Dict , UpperCamelCase__: torch.Tensor , UpperCamelCase__: torch.Tensor ):
lowerCamelCase__ : List[Any] = cls.squareplus(UpperCamelCase__ ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class _lowercase ( _lowercase ):
a = {"""total_count""": 1, """logits""": 1}
a = NegativeBinomial
@classmethod
def lowerCamelCase_ ( cls: Union[str, Any] , UpperCamelCase__: torch.Tensor , UpperCamelCase__: torch.Tensor ):
lowerCamelCase__ : List[Any] = cls.squareplus(UpperCamelCase__ )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: Dict ):
lowerCamelCase__ : Optional[int] = distr_args
if self.dim == 1:
return self.distribution_class(total_count=UpperCamelCase__ , logits=UpperCamelCase__ )
else:
return Independent(self.distribution_class(total_count=UpperCamelCase__ , logits=UpperCamelCase__ ) , 1 )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[torch.Tensor] = None , UpperCamelCase__: Optional[torch.Tensor] = None ):
lowerCamelCase__ : List[str] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) ) | 710 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _lowercase ( _lowercase ):
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , """width_multiplier""" ) )
class _lowercase :
def __init__( self: str , UpperCamelCase__: Optional[int] , UpperCamelCase__: str=13 , UpperCamelCase__: Any=64 , UpperCamelCase__: Optional[Any]=2 , UpperCamelCase__: str=3 , UpperCamelCase__: List[str]="swish" , UpperCamelCase__: Any=3 , UpperCamelCase__: Optional[int]=32 , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: int=0.02 , UpperCamelCase__: Dict=True , UpperCamelCase__: Dict=True , UpperCamelCase__: Any=10 , UpperCamelCase__: int=None , UpperCamelCase__: List[Any]=0.25 , UpperCamelCase__: str=0.0 , UpperCamelCase__: Optional[int]=0.0 , ):
lowerCamelCase__ : Any = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : str = patch_size
lowerCamelCase__ : Optional[int] = num_channels
lowerCamelCase__ : Optional[Any] = make_divisible(512 * width_multiplier , divisor=8 )
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Any = conv_kernel_size
lowerCamelCase__ : Any = output_stride
lowerCamelCase__ : Union[str, Any] = classifier_dropout_prob
lowerCamelCase__ : List[str] = use_labels
lowerCamelCase__ : Optional[Any] = is_training
lowerCamelCase__ : List[str] = num_labels
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : List[Any] = scope
lowerCamelCase__ : Tuple = width_multiplier
lowerCamelCase__ : List[Any] = ffn_dropout
lowerCamelCase__ : Any = attn_dropout
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple = None
lowerCamelCase__ : Optional[Any] = None
if self.use_labels:
lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCamelCase_ ( self: List[Any] ):
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: int , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : Union[str, Any] = MobileViTVaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : str = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Tuple ):
lowerCamelCase__ : Tuple = self.num_labels
lowerCamelCase__ : Dict = MobileViTVaForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : int = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: str ):
lowerCamelCase__ : List[str] = self.num_labels
lowerCamelCase__ : Union[str, Any] = MobileViTVaForSemanticSegmentation(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = config_and_inputs
lowerCamelCase__ : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
a = (
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Tuple = MobileViTVaModelTester(self )
lowerCamelCase__ : List[str] = MobileViTVaConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" )
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" )
def lowerCamelCase_ ( self: List[str] ):
pass
@unittest.skip(reason="""MobileViTV2 does not output attentions""" )
def lowerCamelCase_ ( self: Union[str, Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" )
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase_ ( self: Tuple ):
pass
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Tuple = [*signature.parameters.keys()]
lowerCamelCase__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
def check_hidden_states_output(UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Optional[int] = outputs.hidden_states
lowerCamelCase__ : List[Any] = 5
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCamelCase__ : int = 2
for i in range(len(UpperCamelCase__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : int = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : str = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: Union[str, Any] ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Union[str, Any] = MobileViTVaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> Optional[int]:
lowerCamelCase__ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Tuple ):
return (
MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" )
if is_vision_available()
else None
)
@slow
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Optional[Any] = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to(
UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = self.default_image_processor
lowerCamelCase__ : List[Any] = prepare_img()
lowerCamelCase__ : Any = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : int = model(**UpperCamelCase__ )
# verify the logits
lowerCamelCase__ : str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : int = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Optional[Any] = model.to(UpperCamelCase__ )
lowerCamelCase__ : Any = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Union[str, Any] = prepare_img()
lowerCamelCase__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(**UpperCamelCase__ )
lowerCamelCase__ : str = outputs.logits
# verify the logits
lowerCamelCase__ : List[str] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , UpperCamelCase__ )
lowerCamelCase__ : Any = torch.tensor(
[
[[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]],
[[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]],
[[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]],
] , device=UpperCamelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Optional[Any] = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : List[Any] = model.to(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Optional[Any] = prepare_img()
lowerCamelCase__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Dict = model(**UpperCamelCase__ )
lowerCamelCase__ : List[str] = outputs.logits.detach().cpu()
lowerCamelCase__ : List[Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ , target_sizes=[(50, 60)] )
lowerCamelCase__ : int = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ )
lowerCamelCase__ : int = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
| 631 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _lowercase ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Union[str, Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
lowerCamelCase__ : Optional[Any] = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25_543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowerCamelCase__ : Any = model(UpperCamelCase__ )["""last_hidden_state"""]
lowerCamelCase__ : Optional[int] = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , UpperCamelCase__ )
# compare the actual values for a slice.
lowerCamelCase__ : str = tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 711 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_A : Optional[Any] =logging.get_logger(__name__)
_A : Dict ={'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_A : Tuple ={
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
_A : List[Any] ={
'''gpt-neox-20b''': 2_048,
}
class _lowercase ( _lowercase ):
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = ["""input_ids""", """attention_mask"""]
def __init__( self: Optional[int] , UpperCamelCase__: Union[str, Any]=None , UpperCamelCase__: int=None , UpperCamelCase__: Tuple=None , UpperCamelCase__: Any="<|endoftext|>" , UpperCamelCase__: Any="<|endoftext|>" , UpperCamelCase__: Union[str, Any]="<|endoftext|>" , UpperCamelCase__: Tuple=False , **UpperCamelCase__: str , ):
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
lowerCamelCase__ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCamelCase__ ) != add_prefix_space:
lowerCamelCase__ : Any = getattr(UpperCamelCase__ , pre_tok_state.pop("""type""" ) )
lowerCamelCase__ : Dict = add_prefix_space
lowerCamelCase__ : Optional[int] = pre_tok_class(**UpperCamelCase__ )
lowerCamelCase__ : Dict = add_prefix_space
def lowerCamelCase_ ( self: int , UpperCamelCase__: str , UpperCamelCase__: Optional[str] = None ):
lowerCamelCase__ : Optional[Any] = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: "Conversation" ):
lowerCamelCase__ : str = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
lowerCamelCase__ : int = input_ids[-self.model_max_length :]
return input_ids
| 631 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : List[Any] = tempfile.mkdtemp()
lowerCamelCase__ : int = BlipImageProcessor()
lowerCamelCase__ : Tuple = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
lowerCamelCase__ : List[Any] = BlipProcessor(UpperCamelCase__ , UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self: List[Any] , **UpperCamelCase__: str ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).tokenizer
def lowerCamelCase_ ( self: Union[str, Any] , **UpperCamelCase__: Optional[Any] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).image_processor
def lowerCamelCase_ ( self: List[str] ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase__ : Optional[int] = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Dict = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ : Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCamelCase__ : int = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 )
lowerCamelCase__ : Any = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : int = self.get_image_processor()
lowerCamelCase__ : int = self.get_tokenizer()
lowerCamelCase__ : int = BlipProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = self.prepare_image_inputs()
lowerCamelCase__ : List[Any] = image_processor(UpperCamelCase__ , return_tensors="""np""" )
lowerCamelCase__ : Dict = processor(images=UpperCamelCase__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Optional[Any] = self.get_image_processor()
lowerCamelCase__ : List[str] = self.get_tokenizer()
lowerCamelCase__ : Optional[Any] = BlipProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : str = """lower newer"""
lowerCamelCase__ : List[str] = processor(text=UpperCamelCase__ )
lowerCamelCase__ : str = tokenizer(UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : List[Any] = self.get_image_processor()
lowerCamelCase__ : Optional[Any] = self.get_tokenizer()
lowerCamelCase__ : Union[str, Any] = BlipProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : str = """lower newer"""
lowerCamelCase__ : Union[str, Any] = self.prepare_image_inputs()
lowerCamelCase__ : int = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Tuple = self.get_image_processor()
lowerCamelCase__ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase__ : Any = BlipProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase__ : Optional[int] = processor.batch_decode(UpperCamelCase__ )
lowerCamelCase__ : Tuple = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : List[Any] = self.get_image_processor()
lowerCamelCase__ : Tuple = self.get_tokenizer()
lowerCamelCase__ : Tuple = BlipProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : str = """lower newer"""
lowerCamelCase__ : Tuple = self.prepare_image_inputs()
lowerCamelCase__ : List[str] = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 712 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A : Dict ={
'''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''],
'''processing_git''': ['''GitProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] =[
'''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GitForCausalLM''',
'''GitModel''',
'''GitPreTrainedModel''',
'''GitVisionModel''',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_A : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 631 | 0 |
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase=None ) -> List[str]:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match'''
lowerCamelCase__ : Union[str, Any] = nn.Parameter(UpperCamelCase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match'''
lowerCamelCase__ : Union[str, Any] = nn.Parameter(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
# set torch weights for 1-to-1 comparison
lowerCamelCase__ : Tuple = np.asarray(weights[0] )
lowerCamelCase__ : Dict = np.asarray(weights[1] )
lowerCamelCase__ : Tuple = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(UpperCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase ).view(-1 , UpperCamelCase ).contiguous().transpose(0 , 1 ) , )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any:
# set torch weights for 1-to-1 comparison
lowerCamelCase__ : str = np.asarray(weights[0] )
lowerCamelCase__ : List[str] = np.asarray(weights[1] )
lowerCamelCase__ : str = np.asarray(weights[2] )
lowerCamelCase__ : Union[str, Any] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(UpperCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(UpperCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase ).view(-1 , UpperCamelCase ).contiguous().transpose(0 , 1 ) , )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
# layernorm 1
lowerCamelCase__ : Optional[Any] = weights[0][0][0]
lowerCamelCase__ : Dict = np.asarray(layer_norm_a[0] )
lowerCamelCase__ : int = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(UpperCamelCase ) , torch.tensor(UpperCamelCase ) , )
# lsh weights + output
lowerCamelCase__ : Any = weights[0][1]
if len(UpperCamelCase ) < 4:
set_layer_weights_in_torch_lsh(UpperCamelCase , torch_block.attention , UpperCamelCase )
else:
set_layer_weights_in_torch_local(UpperCamelCase , torch_block.attention , UpperCamelCase )
# intermediate weighs
lowerCamelCase__ : Optional[int] = weights[2][0][1][2]
# Chunked Feed Forward
if len(UpperCamelCase ) == 4:
lowerCamelCase__ : str = intermediate_weights[2]
# layernorm 2
lowerCamelCase__ : Optional[int] = np.asarray(intermediate_weights[0][0] )
lowerCamelCase__ : Optional[Any] = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(UpperCamelCase ) , torch.tensor(UpperCamelCase ) , )
# intermediate dense
lowerCamelCase__ : Optional[int] = np.asarray(intermediate_weights[1][0] )
lowerCamelCase__ : Union[str, Any] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(UpperCamelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase ) , )
# intermediate out
lowerCamelCase__ : str = np.asarray(intermediate_weights[4][0] )
lowerCamelCase__ : Union[str, Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(UpperCamelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase ) , )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
# reformer model
lowerCamelCase__ : int = torch_model.reformer
# word embeds
lowerCamelCase__ : Union[str, Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(UpperCamelCase ) , )
if isinstance(weights[3] , UpperCamelCase ):
lowerCamelCase__ : Optional[Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowerCamelCase__ : List[str] = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'''{position_embeddings[emb_idx]} emb does not match'''
lowerCamelCase__ : Optional[int] = nn.Parameter(torch.tensor(UpperCamelCase ) )
lowerCamelCase__ : Tuple = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
UpperCamelCase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowerCamelCase__ : List[Any] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# output layer norm
lowerCamelCase__ : Union[str, Any] = np.asarray(weights[7][0] )
lowerCamelCase__ : Union[str, Any] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(UpperCamelCase ) , torch.tensor(UpperCamelCase ) , )
# output embeddings
lowerCamelCase__ : Union[str, Any] = np.asarray(weights[9][0] )
lowerCamelCase__ : Optional[int] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(UpperCamelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase ) , )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]:
# Initialise PyTorch model
lowerCamelCase__ : Optional[Any] = ReformerConfig.from_json_file(UpperCamelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase__ : int = ReformerModelWithLMHead(UpperCamelCase )
with open(UpperCamelCase , """rb""" ) as f:
lowerCamelCase__ : Union[str, Any] = pickle.load(UpperCamelCase )["""weights"""]
set_model_weights_in_torch(UpperCamelCase , UpperCamelCase , config.hidden_size )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , UpperCamelCase )
if __name__ == "__main__":
_A : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_A : Tuple =parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 713 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
_A : int =get_tests_dir('''fixtures/test_sentencepiece.model''')
_A : Tuple ={'''target_lang''': '''fi''', '''source_lang''': '''en'''}
_A : int ='''>>zh<<'''
_A : Dict ='''Helsinki-NLP/'''
if is_torch_available():
_A : List[Any] ='''pt'''
elif is_tf_available():
_A : Optional[int] ='''tf'''
else:
_A : Dict ='''jax'''
@require_sentencepiece
class _lowercase ( _lowercase , unittest.TestCase ):
a = MarianTokenizer
a = False
a = True
def lowerCamelCase_ ( self: List[str] ):
super().setUp()
lowerCamelCase__ : List[Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
lowerCamelCase__ : Optional[Any] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
lowerCamelCase__ : Optional[int] = Path(self.tmpdirname )
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
lowerCamelCase__ : Dict = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self: Optional[Any] , **UpperCamelCase__: Any ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: str , UpperCamelCase__: List[str] ):
return (
"This is a test",
"This is a test",
)
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Any = """</s>"""
lowerCamelCase__ : List[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(UpperCamelCase__ ) , 9 )
def lowerCamelCase_ ( self: int ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : List[Any] = MarianTokenizer.from_pretrained(F'''{ORG_NAME}opus-mt-en-de''' )
lowerCamelCase__ : Optional[int] = en_de_tokenizer(["""I am a small frog"""] , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = [38, 121, 14, 697, 38_848, 0]
self.assertListEqual(UpperCamelCase__ , batch.input_ids[0] )
lowerCamelCase__ : List[str] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Tuple = [x.name for x in Path(UpperCamelCase__ ).glob("""*""" )]
self.assertIn("""source.spm""" , UpperCamelCase__ )
MarianTokenizer.from_pretrained(UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : List[Any] = self.get_tokenizer()
lowerCamelCase__ : Any = tok(
["""I am a small frog""" * 1_000, """I am a small frog"""] , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : str = self.get_tokenizer()
lowerCamelCase__ : Dict = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def lowerCamelCase_ ( self: List[str] ):
# fmt: off
lowerCamelCase__ : int = {"""input_ids""": [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Union[str, Any] = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
lowerCamelCase__ : str = """Tämä on testi"""
lowerCamelCase__ : Any = """This is a test"""
lowerCamelCase__ : int = [76, 7, 2_047, 2]
lowerCamelCase__ : List[str] = [69, 12, 11, 940, 2]
lowerCamelCase__ : Tuple = tokenizer(UpperCamelCase__ ).input_ids
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = tokenizer(text_target=UpperCamelCase__ ).input_ids
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Tuple = tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
| 631 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class _lowercase :
'''simple docstring'''
a = 42 # [batch_size x 3]
a = 42 # [batch_size x 3]
a = 42 # [batch_size x 3]
a = 42 # [batch_size x 3]
a = 42
a = 42
a = 42
a = 42
a = 42
def lowerCamelCase_ ( self: str ):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def lowerCamelCase_ ( self: Optional[int] ):
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def lowerCamelCase_ ( self: List[Any] ):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Tuple = torch.arange(self.height * self.width )
lowerCamelCase__ : List[str] = torch.stack(
[
pixel_indices % self.width,
torch.div(UpperCamelCase__ , self.width , rounding_mode="""trunc""" ),
] , axis=1 , )
return coords
@property
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Any = self.shape
lowerCamelCase__ : Optional[Any] = int(np.prod(UpperCamelCase__ ) )
lowerCamelCase__ : Union[str, Any] = self.get_image_coords()
lowerCamelCase__ : List[str] = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
lowerCamelCase__ : str = self.get_camera_rays(UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = rays.view(UpperCamelCase__ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def lowerCamelCase_ ( self: int , UpperCamelCase__: torch.Tensor ):
lowerCamelCase__ : Optional[int] = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
lowerCamelCase__ : List[str] = coords.view(UpperCamelCase__ , -1 , 2 )
lowerCamelCase__ : Optional[int] = self.resolution()
lowerCamelCase__ : Optional[Any] = self.fov()
lowerCamelCase__ : Optional[Any] = (flat.float() / (res - 1)) * 2 - 1
lowerCamelCase__ : Dict = fracs * torch.tan(fov / 2 )
lowerCamelCase__ : Union[str, Any] = fracs.view(UpperCamelCase__ , -1 , 2 )
lowerCamelCase__ : Tuple = (
self.z.view(UpperCamelCase__ , 1 , 3 )
+ self.x.view(UpperCamelCase__ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(UpperCamelCase__ , 1 , 3 ) * fracs[:, :, 1:]
)
lowerCamelCase__ : str = directions / directions.norm(dim=-1 , keepdim=UpperCamelCase__ )
lowerCamelCase__ : Dict = torch.stack(
[
torch.broadcast_to(self.origin.view(UpperCamelCase__ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(UpperCamelCase__ , *UpperCamelCase__ , 2 , 3 )
def lowerCamelCase_ ( self: str , UpperCamelCase__: int , UpperCamelCase__: int ):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=UpperCamelCase__ , height=UpperCamelCase__ , x_fov=self.x_fov , y_fov=self.y_fov , )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> DifferentiableProjectiveCamera:
lowerCamelCase__ : Optional[int] = []
lowerCamelCase__ : Union[str, Any] = []
lowerCamelCase__ : Dict = []
lowerCamelCase__ : List[Any] = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
lowerCamelCase__ : List[str] = np.array([np.sin(UpperCamelCase ), np.cos(UpperCamelCase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
lowerCamelCase__ : Any = -z * 4
lowerCamelCase__ : int = np.array([np.cos(UpperCamelCase ), -np.sin(UpperCamelCase ), 0.0] )
lowerCamelCase__ : List[Any] = np.cross(UpperCamelCase , UpperCamelCase )
origins.append(UpperCamelCase )
xs.append(UpperCamelCase )
ys.append(UpperCamelCase )
zs.append(UpperCamelCase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(UpperCamelCase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(UpperCamelCase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(UpperCamelCase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(UpperCamelCase , axis=0 ) ).float() , width=UpperCamelCase , height=UpperCamelCase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(UpperCamelCase )) , )
| 714 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Optional[Any] =logging.get_logger(__name__)
_A : Optional[int] ={
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class _lowercase ( _lowercase ):
a = """rwkv"""
a = {"""max_position_embeddings""": """context_length"""}
def __init__( self: Tuple , UpperCamelCase__: Optional[Any]=50_277 , UpperCamelCase__: Union[str, Any]=1_024 , UpperCamelCase__: Tuple=4_096 , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Dict=None , UpperCamelCase__: Dict=None , UpperCamelCase__: int=1e-5 , UpperCamelCase__: Any=0 , UpperCamelCase__: str=0 , UpperCamelCase__: Union[str, Any]=6 , UpperCamelCase__: Optional[int]=False , UpperCamelCase__: Dict=True , **UpperCamelCase__: Dict , ):
lowerCamelCase__ : Dict = vocab_size
lowerCamelCase__ : Optional[Any] = context_length
lowerCamelCase__ : Optional[Any] = hidden_size
lowerCamelCase__ : Any = num_hidden_layers
lowerCamelCase__ : int = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowerCamelCase__ : Union[str, Any] = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowerCamelCase__ : List[str] = layer_norm_epsilon
lowerCamelCase__ : int = rescale_every
lowerCamelCase__ : Optional[int] = use_cache
lowerCamelCase__ : Dict = bos_token_id
lowerCamelCase__ : Any = eos_token_id
super().__init__(
tie_word_embeddings=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 631 | 0 |
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = 50 ) -> int:
lowerCamelCase__ : List[str] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F'{solution() = }')
| 715 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : str =logging.get_logger(__name__)
_A : int ={
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class _lowercase ( _lowercase ):
a = """roc_bert"""
def __init__( self: Optional[Any] , UpperCamelCase__: Any=30_522 , UpperCamelCase__: Optional[Any]=768 , UpperCamelCase__: Union[str, Any]=12 , UpperCamelCase__: Tuple=12 , UpperCamelCase__: Tuple=3_072 , UpperCamelCase__: str="gelu" , UpperCamelCase__: List[Any]=0.1 , UpperCamelCase__: List[str]=0.1 , UpperCamelCase__: Dict=512 , UpperCamelCase__: str=2 , UpperCamelCase__: str=0.02 , UpperCamelCase__: Tuple=1e-12 , UpperCamelCase__: Any=True , UpperCamelCase__: Union[str, Any]=0 , UpperCamelCase__: List[Any]="absolute" , UpperCamelCase__: Any=None , UpperCamelCase__: Any=True , UpperCamelCase__: Optional[int]=True , UpperCamelCase__: Union[str, Any]=768 , UpperCamelCase__: int=910 , UpperCamelCase__: Tuple=512 , UpperCamelCase__: int=24_858 , UpperCamelCase__: Optional[Any]=True , **UpperCamelCase__: Optional[Any] , ):
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : Tuple = max_position_embeddings
lowerCamelCase__ : List[Any] = hidden_size
lowerCamelCase__ : int = num_hidden_layers
lowerCamelCase__ : Tuple = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Dict = attention_probs_dropout_prob
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : Tuple = type_vocab_size
lowerCamelCase__ : Optional[Any] = layer_norm_eps
lowerCamelCase__ : List[Any] = use_cache
lowerCamelCase__ : Tuple = enable_pronunciation
lowerCamelCase__ : Union[str, Any] = enable_shape
lowerCamelCase__ : Union[str, Any] = pronunciation_embed_dim
lowerCamelCase__ : Any = pronunciation_vocab_size
lowerCamelCase__ : int = shape_embed_dim
lowerCamelCase__ : Tuple = shape_vocab_size
lowerCamelCase__ : Optional[Any] = concat_input
lowerCamelCase__ : str = position_embedding_type
lowerCamelCase__ : Dict = classifier_dropout
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 631 | 0 |
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
_A : str =pd.read_csv('''sample_data.csv''', header=None)
_A : Any =df.shape[:1][0]
# If you're using some other dataset input the target column
_A : int =df.iloc[:, 1:2]
_A : List[str] =actual_data.values.reshape(len_data, 1)
_A : Any =MinMaxScaler().fit_transform(actual_data)
_A : Union[str, Any] =10
_A : Optional[Any] =5
_A : Any =20
_A : List[Any] =len_data - periods * look_back
_A : Union[str, Any] =actual_data[:division]
_A : Any =actual_data[division - look_back :]
_A : Tuple =[], []
_A : Any =[], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
_A : str =np.array(train_x)
_A : Optional[int] =np.array(test_x)
_A : Tuple =np.array([list(i.ravel()) for i in train_y])
_A : str =np.array([list(i.ravel()) for i in test_y])
_A : List[str] =Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss='''mean_squared_error''', optimizer='''adam''')
_A : Union[str, Any] =model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
_A : Any =model.predict(x_test)
| 716 |
'''simple docstring'''
import sys
import turtle
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> None:
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
_A : Any =turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
_A : Dict =[(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 631 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> int:
lowerCamelCase__ : int = [1]
lowerCamelCase__ : str = 0, 0, 0
lowerCamelCase__ : Optional[int] = ugly_nums[ia] * 2
lowerCamelCase__ : List[str] = ugly_nums[ia] * 3
lowerCamelCase__ : str = ugly_nums[ia] * 5
for _ in range(1 , UpperCamelCase ):
lowerCamelCase__ : List[str] = min(UpperCamelCase , UpperCamelCase , UpperCamelCase )
ugly_nums.append(UpperCamelCase )
if next_num == next_a:
ia += 1
lowerCamelCase__ : str = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
lowerCamelCase__ : Optional[int] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
lowerCamelCase__ : Optional[Any] = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F'{ugly_numbers(200) = }')
| 717 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _lowercase :
def __init__( self: int , UpperCamelCase__: Dict , UpperCamelCase__: List[str]=13 , UpperCamelCase__: Union[str, Any]=7 , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: List[Any]=True , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: int=True , UpperCamelCase__: List[Any]=99 , UpperCamelCase__: Tuple=32 , UpperCamelCase__: List[str]=2 , UpperCamelCase__: Optional[Any]=4 , UpperCamelCase__: Optional[int]=37 , UpperCamelCase__: Any="gelu" , UpperCamelCase__: Any=0.1 , UpperCamelCase__: int=0.1 , UpperCamelCase__: Optional[Any]=512 , UpperCamelCase__: List[str]=16 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: Dict=0.02 , UpperCamelCase__: Tuple=3 , UpperCamelCase__: Optional[int]=4 , UpperCamelCase__: Union[str, Any]=None , ):
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Union[str, Any] = 13
lowerCamelCase__ : Any = 7
lowerCamelCase__ : int = True
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : Dict = True
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : str = 99
lowerCamelCase__ : Dict = 384
lowerCamelCase__ : Optional[Any] = 2
lowerCamelCase__ : Optional[int] = 4
lowerCamelCase__ : Optional[Any] = 37
lowerCamelCase__ : Union[str, Any] = """gelu"""
lowerCamelCase__ : int = 0.1
lowerCamelCase__ : Optional[Any] = 0.1
lowerCamelCase__ : List[Any] = 512
lowerCamelCase__ : Optional[Any] = 16
lowerCamelCase__ : Any = 2
lowerCamelCase__ : Optional[Any] = 0.02
lowerCamelCase__ : int = 3
lowerCamelCase__ : List[str] = 4
lowerCamelCase__ : Any = 128
lowerCamelCase__ : List[Any] = 2
lowerCamelCase__ : Optional[Any] = 9
lowerCamelCase__ : Any = 1
lowerCamelCase__ : Optional[int] = None
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : str = None
if self.use_input_mask:
lowerCamelCase__ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : List[str] = None
if self.use_token_type_ids:
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ : int = None
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Optional[Any] = None
if self.use_labels:
lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : Any = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : List[Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCamelCase__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Dict , UpperCamelCase__: List[str] , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: str , UpperCamelCase__: Any ):
lowerCamelCase__ : List[Any] = TFConvBertModel(config=UpperCamelCase__ )
lowerCamelCase__ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCamelCase__ : List[str] = [input_ids, input_mask]
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: Tuple , UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple ):
lowerCamelCase__ : int = TFConvBertForMaskedLM(config=UpperCamelCase__ )
lowerCamelCase__ : Tuple = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : int = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self: str , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : int = self.num_labels
lowerCamelCase__ : Dict = TFConvBertForSequenceClassification(config=UpperCamelCase__ )
lowerCamelCase__ : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[str] , UpperCamelCase__: int , UpperCamelCase__: List[str] , UpperCamelCase__: Dict ):
lowerCamelCase__ : Optional[int] = self.num_choices
lowerCamelCase__ : Dict = TFConvBertForMultipleChoice(config=UpperCamelCase__ )
lowerCamelCase__ : int = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ : List[str] = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ : Any = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ : Tuple = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self: str , UpperCamelCase__: Any , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Optional[int] , UpperCamelCase__: str , UpperCamelCase__: int ):
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : List[str] = TFConvBertForTokenClassification(config=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: List[Any] , UpperCamelCase__: str , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : Optional[int] = TFConvBertForQuestionAnswering(config=UpperCamelCase__ )
lowerCamelCase__ : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : str = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : str = config_and_inputs
lowerCamelCase__ : Optional[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
a = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a = False
a = False
a = False
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Dict = TFConvBertModelTester(self )
lowerCamelCase__ : Dict = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self: List[str] ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Dict = True
lowerCamelCase__ : Tuple = True
if hasattr(UpperCamelCase__ , """use_cache""" ):
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : List[str] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
lowerCamelCase__ : Tuple = getattr(self.model_tester , """key_length""" , UpperCamelCase__ )
for model_class in self.all_model_classes:
lowerCamelCase__ : int = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : Dict = len(model(UpperCamelCase__ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ , saved_model=UpperCamelCase__ )
lowerCamelCase__ : int = os.path.join(UpperCamelCase__ , """saved_model""" , """1""" )
lowerCamelCase__ : List[Any] = tf.keras.models.load_model(UpperCamelCase__ )
lowerCamelCase__ : Any = model(UpperCamelCase__ )
if self.is_encoder_decoder:
lowerCamelCase__ : Dict = outputs["""encoder_hidden_states"""]
lowerCamelCase__ : Any = outputs["""encoder_attentions"""]
else:
lowerCamelCase__ : int = outputs["""hidden_states"""]
lowerCamelCase__ : Optional[int] = outputs["""attentions"""]
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Union[str, Any] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : int = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
lowerCamelCase__ : Any = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
lowerCamelCase__ : Optional[int] = getattr(self.model_tester , """key_length""" , UpperCamelCase__ )
lowerCamelCase__ : List[Any] = getattr(self.model_tester , """key_length""" , UpperCamelCase__ )
def check_decoder_attentions_output(UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : List[Any] = len(UpperCamelCase__ )
self.assertEqual(out_len % 2 , 0 )
lowerCamelCase__ : Any = outputs.decoder_attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(UpperCamelCase__: List[str] ):
lowerCamelCase__ : Any = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
lowerCamelCase__ : int = True
lowerCamelCase__ : Any = False
lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : List[str] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Optional[int] = len(UpperCamelCase__ )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
if self.is_encoder_decoder:
lowerCamelCase__ : str = model_class(UpperCamelCase__ )
lowerCamelCase__ : Tuple = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_decoder_attentions_output(UpperCamelCase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Dict = model_class(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
# Check attention is always last and order is fine
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : int = True
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCamelCase__ ) )
self.assertEqual(model.config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
@require_tf
class _lowercase ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Dict = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
lowerCamelCase__ : Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )[0]
lowerCamelCase__ : Dict = [1, 6, 768]
self.assertEqual(output.shape , UpperCamelCase__ )
lowerCamelCase__ : Dict = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 )
| 631 | 0 |
'''simple docstring'''
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A : int =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
print("""Loading config file...""" )
def flatten_yaml_as_dict(UpperCamelCase , UpperCamelCase="" , UpperCamelCase="." ):
lowerCamelCase__ : Optional[int] = []
for k, v in d.items():
lowerCamelCase__ : Optional[int] = parent_key + sep + k if parent_key else k
if isinstance(UpperCamelCase , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(UpperCamelCase , UpperCamelCase , sep=UpperCamelCase ).items() )
else:
items.append((new_key, v) )
return dict(UpperCamelCase )
lowerCamelCase__ : Any = argparse.Namespace()
with open(UpperCamelCase , """r""" ) as yaml_file:
try:
lowerCamelCase__ : int = yaml.load(UpperCamelCase , Loader=yaml.FullLoader )
lowerCamelCase__ : Tuple = flatten_yaml_as_dict(UpperCamelCase )
for k, v in flat_cfg.items():
setattr(UpperCamelCase , UpperCamelCase , UpperCamelCase )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(UpperCamelCase , str(UpperCamelCase ) ) )
return config
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
lowerCamelCase__ : Union[str, Any] = MobileViTVaConfig()
lowerCamelCase__ : str = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
lowerCamelCase__ : Optional[Any] = 1000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
lowerCamelCase__ : int = 384
else:
lowerCamelCase__ : Optional[int] = 256
lowerCamelCase__ : str = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
lowerCamelCase__ : Tuple = 21000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
lowerCamelCase__ : str = 384
else:
lowerCamelCase__ : Any = 256
lowerCamelCase__ : int = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
lowerCamelCase__ : Dict = 151
lowerCamelCase__ : str = 512
lowerCamelCase__ : List[Any] = """ade20k-id2label.json"""
lowerCamelCase__ : Union[str, Any] = True
elif task_name.startswith("""voc_""" ):
lowerCamelCase__ : Tuple = 21
lowerCamelCase__ : Optional[int] = 512
lowerCamelCase__ : List[Any] = """pascal-voc-id2label.json"""
lowerCamelCase__ : Tuple = True
# orig_config
lowerCamelCase__ : Optional[int] = load_orig_config_file(UpperCamelCase )
assert getattr(UpperCamelCase , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model"
lowerCamelCase__ : int = getattr(UpperCamelCase , """model.classification.mitv2.width_multiplier""" , 1.0 )
assert (
getattr(UpperCamelCase , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
lowerCamelCase__ : Tuple = getattr(UpperCamelCase , """model.classification.activation.name""" , """swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
lowerCamelCase__ : Any = getattr(UpperCamelCase , """model.segmentation.output_stride""" , 16 )
if "_deeplabv3" in task_name:
lowerCamelCase__ : str = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] )
lowerCamelCase__ : Tuple = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_out_channels""" , 512 )
lowerCamelCase__ : List[Any] = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 )
# id2label
lowerCamelCase__ : Tuple = """huggingface/label-files"""
lowerCamelCase__ : Optional[Any] = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase__ : Union[str, Any] = {int(UpperCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : int = idalabel
lowerCamelCase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any:
lowerCamelCase__ : List[Any] = dct.pop(UpperCamelCase )
lowerCamelCase__ : Dict = val
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> Tuple:
if base_model:
lowerCamelCase__ : Optional[int] = """"""
else:
lowerCamelCase__ : Optional[Any] = """mobilevitv2."""
lowerCamelCase__ : List[str] = []
for k in state_dict.keys():
if k[:8] == "encoder.":
lowerCamelCase__ : Optional[Any] = k[8:]
else:
lowerCamelCase__ : Optional[Any] = k
if ".block." in k:
lowerCamelCase__ : Dict = k_new.replace(""".block.""" , """.""" )
if ".conv." in k:
lowerCamelCase__ : List[Any] = k_new.replace(""".conv.""" , """.convolution.""" )
if ".norm." in k:
lowerCamelCase__ : str = k_new.replace(""".norm.""" , """.normalization.""" )
if "conv_1." in k:
lowerCamelCase__ : Any = k_new.replace("""conv_1.""" , f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
lowerCamelCase__ : Optional[Any] = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
lowerCamelCase__ : Dict = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" )
if ".red_1x1." in k:
lowerCamelCase__ : str = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
lowerCamelCase__ : List[str] = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
lowerCamelCase__ : Optional[Any] = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
lowerCamelCase__ : Dict = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
lowerCamelCase__ : int = [0, 1]
elif i == 4:
lowerCamelCase__ : str = [0, 1, 2, 3]
elif i == 5:
lowerCamelCase__ : Dict = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
lowerCamelCase__ : List[Any] = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
lowerCamelCase__ : Optional[int] = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
lowerCamelCase__ : Optional[int] = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
lowerCamelCase__ : str = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" )
if "pre_norm_attn.1." in k:
lowerCamelCase__ : str = k_new.replace("""pre_norm_attn.1.""" , """attention.""" )
if "pre_norm_ffn.0." in k:
lowerCamelCase__ : Optional[Any] = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" )
if "pre_norm_ffn.1." in k:
lowerCamelCase__ : Union[str, Any] = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
lowerCamelCase__ : List[Any] = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" )
if "classifier.1." in k:
lowerCamelCase__ : Tuple = k_new.replace("""classifier.1.""" , """classifier.""" )
if "seg_head." in k:
lowerCamelCase__ : Optional[int] = k_new.replace("""seg_head.""" , """segmentation_head.""" )
if ".aspp_layer." in k:
lowerCamelCase__ : Any = k_new.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in k:
lowerCamelCase__ : Any = k_new.replace(""".aspp_pool.""" , """.""" )
rename_keys.append((k, k_new) )
return rename_keys
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[str]:
lowerCamelCase__ : Union[str, Any] = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(UpperCamelCase )
for k in keys_to_ignore:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Dict:
lowerCamelCase__ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
lowerCamelCase__ : Tuple = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCamelCase__ : str = get_mobilevitva_config(UpperCamelCase , UpperCamelCase )
# load original state_dict
lowerCamelCase__ : List[str] = torch.load(UpperCamelCase , map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
lowerCamelCase__ : int = MobileViTVaForSemanticSegmentation(UpperCamelCase ).eval()
lowerCamelCase__ : Tuple = False
else:
lowerCamelCase__ : int = MobileViTVaForImageClassification(UpperCamelCase ).eval()
lowerCamelCase__ : Optional[Any] = False
# remove and rename some keys of load the original model
lowerCamelCase__ : Tuple = checkpoint
remove_unused_keys(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = create_rename_keys(UpperCamelCase , base_model=UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# load modified state_dict
model.load_state_dict(UpperCamelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCamelCase__ : int = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowerCamelCase__ : Dict = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowerCamelCase__ : str = model(**UpperCamelCase )
# verify classification model
if task_name.startswith("""imagenet""" ):
lowerCamelCase__ : Dict = outputs.logits
lowerCamelCase__ : Optional[Any] = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
lowerCamelCase__ : Optional[Any] = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01] )
assert torch.allclose(logits[0, :3] , UpperCamelCase , atol=1E-4 )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_A : Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
_A : Dict =parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 718 |
'''simple docstring'''
_A : List[str] ='''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 631 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _lowercase ( _lowercase ):
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , """tf_padding""" ) )
self.parent.assertTrue(hasattr(UpperCamelCase__ , """depth_multiplier""" ) )
class _lowercase :
def __init__( self: Tuple , UpperCamelCase__: str , UpperCamelCase__: List[str]=13 , UpperCamelCase__: Optional[Any]=3 , UpperCamelCase__: str=32 , UpperCamelCase__: int=0.25 , UpperCamelCase__: Dict=8 , UpperCamelCase__: List[Any]=True , UpperCamelCase__: Optional[int]=1_024 , UpperCamelCase__: Tuple=32 , UpperCamelCase__: Union[str, Any]="relu6" , UpperCamelCase__: List[str]=0.1 , UpperCamelCase__: str=0.02 , UpperCamelCase__: Optional[int]=True , UpperCamelCase__: int=True , UpperCamelCase__: Dict=10 , UpperCamelCase__: Dict=None , ):
lowerCamelCase__ : int = parent
lowerCamelCase__ : int = batch_size
lowerCamelCase__ : Optional[Any] = num_channels
lowerCamelCase__ : Union[str, Any] = image_size
lowerCamelCase__ : Optional[Any] = depth_multiplier
lowerCamelCase__ : Tuple = min_depth
lowerCamelCase__ : int = tf_padding
lowerCamelCase__ : Union[str, Any] = int(last_hidden_size * depth_multiplier )
lowerCamelCase__ : Any = output_stride
lowerCamelCase__ : Optional[int] = hidden_act
lowerCamelCase__ : str = classifier_dropout_prob
lowerCamelCase__ : List[str] = use_labels
lowerCamelCase__ : List[str] = is_training
lowerCamelCase__ : List[Any] = num_labels
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : Any = scope
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple = None
lowerCamelCase__ : Tuple = None
if self.use_labels:
lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase__ : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase__ : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCamelCase_ ( self: Union[str, Any] ):
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self: int , UpperCamelCase__: Tuple , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: int , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : Optional[int] = MobileNetVaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Union[str, Any] = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : Any = self.num_labels
lowerCamelCase__ : List[Any] = MobileNetVaForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Any = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Optional[int] = self.prepare_config_and_inputs()
lowerCamelCase__ : Optional[int] = config_and_inputs
lowerCamelCase__ : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
a = (
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Any = MobileNetVaModelTester(self )
lowerCamelCase__ : Union[str, Any] = MobileNetVaConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV1 does not use inputs_embeds""" )
def lowerCamelCase_ ( self: List[str] ):
pass
@unittest.skip(reason="""MobileNetV1 does not support input and output embeddings""" )
def lowerCamelCase_ ( self: Union[str, Any] ):
pass
@unittest.skip(reason="""MobileNetV1 does not output attentions""" )
def lowerCamelCase_ ( self: Tuple ):
pass
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Dict = model_class(UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : List[str] = [*signature.parameters.keys()]
lowerCamelCase__ : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
def check_hidden_states_output(UpperCamelCase__: str , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : Dict = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : List[Any] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Any = outputs.hidden_states
lowerCamelCase__ : Dict = 26
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Any = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : List[str] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: Union[str, Any] ):
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Optional[Any] = MobileNetVaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> Optional[Any]:
lowerCamelCase__ : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Optional[Any] ):
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v1_1.0_224""" ) if is_vision_available() else None
)
@slow
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : List[str] = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v1_1.0_224""" ).to(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = self.default_image_processor
lowerCamelCase__ : List[Any] = prepare_img()
lowerCamelCase__ : Optional[Any] = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Optional[int] = model(**UpperCamelCase__ )
# verify the logits
lowerCamelCase__ : Tuple = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : Tuple = torch.tensor([-4.1_739, -1.1_233, 3.1_205] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 719 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Any =logging.get_logger(__name__)
_A : Dict ={
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class _lowercase ( _lowercase ):
a = """trocr"""
a = ["""past_key_values"""]
a = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self: Optional[Any] , UpperCamelCase__: int=50_265 , UpperCamelCase__: int=1_024 , UpperCamelCase__: Optional[Any]=12 , UpperCamelCase__: Dict=16 , UpperCamelCase__: int=4_096 , UpperCamelCase__: Tuple="gelu" , UpperCamelCase__: int=512 , UpperCamelCase__: Dict=0.1 , UpperCamelCase__: Tuple=0.0 , UpperCamelCase__: str=0.0 , UpperCamelCase__: Any=2 , UpperCamelCase__: Dict=0.02 , UpperCamelCase__: Optional[Any]=0.0 , UpperCamelCase__: str=True , UpperCamelCase__: Tuple=False , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: Tuple=True , UpperCamelCase__: Dict=1 , UpperCamelCase__: List[str]=0 , UpperCamelCase__: Union[str, Any]=2 , **UpperCamelCase__: str , ):
lowerCamelCase__ : Any = vocab_size
lowerCamelCase__ : Tuple = d_model
lowerCamelCase__ : Any = decoder_layers
lowerCamelCase__ : Dict = decoder_attention_heads
lowerCamelCase__ : str = decoder_ffn_dim
lowerCamelCase__ : Tuple = activation_function
lowerCamelCase__ : Dict = max_position_embeddings
lowerCamelCase__ : int = dropout
lowerCamelCase__ : int = attention_dropout
lowerCamelCase__ : List[Any] = activation_dropout
lowerCamelCase__ : Union[str, Any] = init_std
lowerCamelCase__ : Optional[int] = decoder_layerdrop
lowerCamelCase__ : Dict = use_cache
lowerCamelCase__ : Any = scale_embedding
lowerCamelCase__ : Optional[int] = use_learned_position_embeddings
lowerCamelCase__ : List[str] = layernorm_embedding
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
| 631 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_A : Any =logging.get_logger(__name__)
class _lowercase ( _lowercase ):
def __init__( self: Dict , *UpperCamelCase__: Union[str, Any] , **UpperCamelCase__: List[str] ):
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 720 |
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[Any]:
lowerCamelCase__ : str = [False] * len(UpperCamelCase )
lowerCamelCase__ : str = [-1] * len(UpperCamelCase )
def dfs(UpperCamelCase , UpperCamelCase ):
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Union[str, Any] = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCamelCase , 1 - c )
for i in range(len(UpperCamelCase ) ):
if not visited[i]:
dfs(UpperCamelCase , 0 )
for i in range(len(UpperCamelCase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_A : int ={0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 631 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[Any]:
lowerCamelCase__ : str = [False] * len(UpperCamelCase )
lowerCamelCase__ : str = [-1] * len(UpperCamelCase )
def dfs(UpperCamelCase , UpperCamelCase ):
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Union[str, Any] = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCamelCase , 1 - c )
for i in range(len(UpperCamelCase ) ):
if not visited[i]:
dfs(UpperCamelCase , 0 )
for i in range(len(UpperCamelCase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_A : int ={0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 721 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class _lowercase ( _lowercase ):
def __init__( self: Optional[Any] , UpperCamelCase__: Any , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : Optional[int] = dataset
lowerCamelCase__ : Optional[int] = process
lowerCamelCase__ : List[str] = params
def __len__( self: List[str] ):
return len(self.dataset )
def __getitem__( self: Any , UpperCamelCase__: int ):
lowerCamelCase__ : Dict = self.dataset[i]
lowerCamelCase__ : Union[str, Any] = self.process(UpperCamelCase__ , **self.params )
return processed
class _lowercase ( _lowercase ):
def __init__( self: Optional[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: str , UpperCamelCase__: Tuple , UpperCamelCase__: Any=None ):
lowerCamelCase__ : int = loader
lowerCamelCase__ : str = infer
lowerCamelCase__ : Optional[int] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : int = loader_batch_size
# Internal bookkeeping
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Optional[Any] = None
def __len__( self: Dict ):
return len(self.loader )
def __iter__( self: Optional[int] ):
lowerCamelCase__ : List[Any] = iter(self.loader )
return self
def lowerCamelCase_ ( self: Any ):
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
lowerCamelCase__ : str = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
lowerCamelCase__ : int = {}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Convert ModelOutput to tuple first
lowerCamelCase__ : str = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
lowerCamelCase__ : Tuple = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowerCamelCase__ : str = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
lowerCamelCase__ : List[str] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowerCamelCase__ : int = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
lowerCamelCase__ : List[str] = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCamelCase__ : Optional[Any] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCamelCase__ : int = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
lowerCamelCase__ : str = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
lowerCamelCase__ : Optional[int] = self._loader_batch_data.__class__(UpperCamelCase__ )
self._loader_batch_index += 1
return result
def lowerCamelCase_ ( self: List[Any] ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
lowerCamelCase__ : Optional[Any] = next(self.iterator )
lowerCamelCase__ : List[str] = self.infer(UpperCamelCase__ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCamelCase__ , torch.Tensor ):
lowerCamelCase__ : Optional[Any] = processed
else:
lowerCamelCase__ : Union[str, Any] = list(processed.keys() )[0]
lowerCamelCase__ : Any = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ : Any = len(UpperCamelCase__ )
else:
lowerCamelCase__ : List[str] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCamelCase__ : Union[str, Any] = observed_batch_size
# Setting internal index to unwrap the batch
lowerCamelCase__ : List[Any] = processed
lowerCamelCase__ : List[Any] = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class _lowercase ( _lowercase ):
def __init__( self: List[str] , UpperCamelCase__: Any , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any]=None ):
super().__init__(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __iter__( self: Union[str, Any] ):
lowerCamelCase__ : str = iter(self.loader )
lowerCamelCase__ : int = None
return self
def lowerCamelCase_ ( self: str ):
if self.subiterator is None:
lowerCamelCase__ : Optional[Any] = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
lowerCamelCase__ : Tuple = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
lowerCamelCase__ : Any = self.infer(next(self.iterator ) , **self.params )
lowerCamelCase__ : Union[str, Any] = next(self.subiterator )
return processed
class _lowercase ( _lowercase ):
def __iter__( self: List[Any] ):
lowerCamelCase__ : int = iter(self.loader )
return self
def lowerCamelCase_ ( self: Tuple ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
lowerCamelCase__ : List[str] = False
lowerCamelCase__ : Union[str, Any] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
lowerCamelCase__ : Any = self.loader_batch_item()
lowerCamelCase__ : Tuple = item.pop("""is_last""" )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
while not is_last:
lowerCamelCase__ : str = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(UpperCamelCase__ , torch.Tensor ):
lowerCamelCase__ : Dict = processed
else:
lowerCamelCase__ : Dict = list(processed.keys() )[0]
lowerCamelCase__ : Dict = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ : List[Any] = len(UpperCamelCase__ )
else:
lowerCamelCase__ : Dict = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCamelCase__ : str = observed_batch_size
lowerCamelCase__ : str = processed
lowerCamelCase__ : Optional[int] = 0
while self._loader_batch_index < self.loader_batch_size:
lowerCamelCase__ : List[Any] = self.loader_batch_item()
lowerCamelCase__ : str = item.pop("""is_last""" )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
else:
lowerCamelCase__ : Optional[Any] = processed
lowerCamelCase__ : Optional[int] = item.pop("""is_last""" )
accumulator.append(UpperCamelCase__ )
return accumulator
class _lowercase ( _lowercase ):
def __init__( self: Optional[int] , UpperCamelCase__: Dataset , UpperCamelCase__: str ):
lowerCamelCase__ : Union[str, Any] = dataset
lowerCamelCase__ : str = key
def __len__( self: Optional[Any] ):
return len(self.dataset )
def __getitem__( self: List[str] , UpperCamelCase__: Any ):
return self.dataset[i][self.key]
class _lowercase ( _lowercase ):
def __init__( self: Optional[int] , UpperCamelCase__: Dataset , UpperCamelCase__: str , UpperCamelCase__: str ):
lowerCamelCase__ : str = dataset
lowerCamelCase__ : Dict = keya
lowerCamelCase__ : List[str] = keya
def __len__( self: str ):
return len(self.dataset )
def __getitem__( self: List[str] , UpperCamelCase__: Union[str, Any] ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 631 | 0 |
'''simple docstring'''
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class _lowercase ( _lowercase , unittest.TestCase ):
a = BertJapaneseTokenizer
a = False
a = True
def lowerCamelCase_ ( self: int ):
super().setUp()
lowerCamelCase__ : str = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""こんにちは""",
"""こん""",
"""にちは""",
"""ばんは""",
"""##こん""",
"""##にちは""",
"""##ばんは""",
"""世界""",
"""##世界""",
"""、""",
"""##、""",
"""。""",
"""##。""",
]
lowerCamelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: str ):
lowerCamelCase__ : int = """こんにちは、世界。 \nこんばんは、世界。"""
lowerCamelCase__ : Dict = """こんにちは 、 世界 。 こんばんは 、 世界 。"""
return input_text, output_text
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: str ):
lowerCamelCase__ : List[Any] = self.get_input_output_texts(UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = tokenizer.decode(UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
return text, ids
def lowerCamelCase_ ( self: Tuple ):
pass # TODO add if relevant
def lowerCamelCase_ ( self: List[str] ):
pass # TODO add if relevant
def lowerCamelCase_ ( self: str ):
pass # TODO add if relevant
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : int = self.tokenizer_class(self.vocab_file )
lowerCamelCase__ : str = tokenizer.tokenize("""こんにちは、世界。\nこんばんは、世界。""" )
self.assertListEqual(UpperCamelCase__ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Dict = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""mecab""" )
self.assertIsNotNone(UpperCamelCase__ )
lowerCamelCase__ : str = """こんにちは、世界。\nこんばんは、世界。"""
lowerCamelCase__ : Optional[Any] = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCamelCase__ : Dict = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(UpperCamelCase__ , """wb""" ) as handle:
pickle.dump(UpperCamelCase__ , UpperCamelCase__ )
with open(UpperCamelCase__ , """rb""" ) as handle:
lowerCamelCase__ : Dict = pickle.load(UpperCamelCase__ )
lowerCamelCase__ : int = tokenizer_new.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Any = MecabTokenizer(mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def lowerCamelCase_ ( self: Any ):
try:
lowerCamelCase__ : Tuple = MecabTokenizer(mecab_dic="""unidic_lite""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def lowerCamelCase_ ( self: List[Any] ):
try:
lowerCamelCase__ : Optional[int] = MecabTokenizer(mecab_dic="""unidic""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Any = MecabTokenizer(do_lower_case=UpperCamelCase__ , mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iphone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def lowerCamelCase_ ( self: Any ):
try:
lowerCamelCase__ : Optional[int] = MecabTokenizer(
do_lower_case=UpperCamelCase__ , normalize_text=UpperCamelCase__ , mecab_option="""-d /usr/local/lib/mecab/dic/jumandic""" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : int = MecabTokenizer(normalize_text=UpperCamelCase__ , mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """ """, """。"""] , )
@require_sudachi
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : List[Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""sudachi""" )
self.assertIsNotNone(UpperCamelCase__ )
lowerCamelCase__ : int = """こんにちは、世界。\nこんばんは、世界。"""
lowerCamelCase__ : Optional[Any] = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCamelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(UpperCamelCase__ , """wb""" ) as handle:
pickle.dump(UpperCamelCase__ , UpperCamelCase__ )
with open(UpperCamelCase__ , """rb""" ) as handle:
lowerCamelCase__ : List[Any] = pickle.load(UpperCamelCase__ )
lowerCamelCase__ : Tuple = tokenizer_new.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@require_sudachi
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Dict = SudachiTokenizer(sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Dict = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""A""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国""", """人""", """参政""", """権"""] )
@require_sudachi
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Union[str, Any] = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""B""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国人""", """参政権"""] )
@require_sudachi
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : int = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""C""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国人参政権"""] )
@require_sudachi
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : List[Any] = SudachiTokenizer(do_lower_case=UpperCamelCase__ , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iphone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Dict = SudachiTokenizer(normalize_text=UpperCamelCase__ , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """\u3000""", """。""", """ """, """ """] , )
@require_sudachi
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : str = SudachiTokenizer(trim_whitespace=UpperCamelCase__ , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
@require_jumanpp
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Union[str, Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""jumanpp""" )
self.assertIsNotNone(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = """こんにちは、世界。\nこんばんは、世界。"""
lowerCamelCase__ : Any = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCamelCase__ : Optional[int] = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(UpperCamelCase__ , """wb""" ) as handle:
pickle.dump(UpperCamelCase__ , UpperCamelCase__ )
with open(UpperCamelCase__ , """rb""" ) as handle:
lowerCamelCase__ : Any = pickle.load(UpperCamelCase__ )
lowerCamelCase__ : Tuple = tokenizer_new.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@require_jumanpp
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : List[Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Optional[int] = JumanppTokenizer(do_lower_case=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iphone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Any = JumanppTokenizer(normalize_text=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""ア""", """ッ""", """フ""", """゚""", """ル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Union[str, Any] = JumanppTokenizer(trim_whitespace=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """。"""] , )
@require_jumanpp
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : int = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("""ありがとうございますm(_ _)m見つけるのが大変です。""" ) , ["""ありがとう""", """ございます""", """m(_ _)m""", """見つける""", """の""", """が""", """大変です""", """。"""] , )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Optional[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """こんにちは""", """こん""", """にちは""", """ばんは""", """##こん""", """##にちは""", """##ばんは"""]
lowerCamelCase__ : Optional[int] = {}
for i, token in enumerate(UpperCamelCase__ ):
lowerCamelCase__ : List[Any] = i
lowerCamelCase__ : Any = WordpieceTokenizer(vocab=UpperCamelCase__ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) , ["""こんにちは"""] )
self.assertListEqual(tokenizer.tokenize("""こんばんは""" ) , ["""こん""", """##ばんは"""] )
self.assertListEqual(tokenizer.tokenize("""こんばんは こんばんにちは こんにちは""" ) , ["""こん""", """##ばんは""", """[UNK]""", """こんにちは"""] )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : str = BertJapaneseTokenizer.from_pretrained("""nlp-waseda/roberta-base-japanese-with-auto-jumanpp""" )
lowerCamelCase__ : str = tokenizer.subword_tokenizer
lowerCamelCase__ : str = subword_tokenizer.tokenize("""国境 の 長い トンネル を 抜ける と 雪国 であった 。""" )
self.assertListEqual(UpperCamelCase__ , ["""▁国境""", """▁の""", """▁長い""", """▁トンネル""", """▁を""", """▁抜ける""", """▁と""", """▁雪""", """国""", """▁であった""", """▁。"""] )
lowerCamelCase__ : str = subword_tokenizer.tokenize("""こんばんは こんばん にち は こんにちは""" )
self.assertListEqual(UpperCamelCase__ , ["""▁こん""", """ばん""", """は""", """▁こん""", """ばん""", """▁に""", """ち""", """▁は""", """▁こんにちは"""] )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Any = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese""" )
lowerCamelCase__ : Union[str, Any] = tokenizer.encode("""ありがとう。""" , add_special_tokens=UpperCamelCase__ )
lowerCamelCase__ : Dict = tokenizer.encode("""どういたしまして。""" , add_special_tokens=UpperCamelCase__ )
lowerCamelCase__ : List[str] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
lowerCamelCase__ : str = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _lowercase ( _lowercase , unittest.TestCase ):
a = BertJapaneseTokenizer
a = False
def lowerCamelCase_ ( self: Optional[int] ):
super().setUp()
lowerCamelCase__ : Any = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
lowerCamelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def lowerCamelCase_ ( self: List[Any] , **UpperCamelCase__: int ):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="""character""" , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: str ):
lowerCamelCase__ : str = """こんにちは、世界。 \nこんばんは、世界。"""
lowerCamelCase__ : List[str] = """こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"""
return input_text, output_text
def lowerCamelCase_ ( self: str ):
pass # TODO add if relevant
def lowerCamelCase_ ( self: int ):
pass # TODO add if relevant
def lowerCamelCase_ ( self: str ):
pass # TODO add if relevant
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : List[str] = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="""character""" )
lowerCamelCase__ : List[Any] = tokenizer.tokenize("""こんにちは、世界。 \nこんばんは、世界。""" )
self.assertListEqual(
UpperCamelCase__ , ["""こ""", """ん""", """に""", """ち""", """は""", """、""", """世""", """界""", """。""", """こ""", """ん""", """ば""", """ん""", """は""", """、""", """世""", """界""", """。"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Any = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
lowerCamelCase__ : Optional[Any] = {}
for i, token in enumerate(UpperCamelCase__ ):
lowerCamelCase__ : Union[str, Any] = i
lowerCamelCase__ : Union[str, Any] = CharacterTokenizer(vocab=UpperCamelCase__ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) , ["""こ""", """ん""", """に""", """ち""", """は"""] )
self.assertListEqual(tokenizer.tokenize("""こんにちほ""" ) , ["""こ""", """ん""", """に""", """ち""", """[UNK]"""] )
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : int = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese-char""" )
lowerCamelCase__ : List[Any] = tokenizer.encode("""ありがとう。""" , add_special_tokens=UpperCamelCase__ )
lowerCamelCase__ : int = tokenizer.encode("""どういたしまして。""" , add_special_tokens=UpperCamelCase__ )
lowerCamelCase__ : List[str] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Optional[Any] = """cl-tohoku/bert-base-japanese"""
lowerCamelCase__ : Optional[int] = AutoTokenizer.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Union[str, Any] = """cl-tohoku/bert-base-japanese"""
with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm:
BertTokenizer.from_pretrained(UpperCamelCase__ )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
lowerCamelCase__ : Union[str, Any] = """bert-base-cased"""
with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm:
BertJapaneseTokenizer.from_pretrained(UpperCamelCase__ )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
| 700 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
_A : Dict ='''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 631 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A : str ={'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : int =[
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : int =[
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
_A : List[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 701 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_A : Any ={
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_A : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 631 | 0 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
_A : int =get_tests_dir('''fixtures/test_sentencepiece.model''')
_A : Tuple ={'''target_lang''': '''fi''', '''source_lang''': '''en'''}
_A : int ='''>>zh<<'''
_A : Dict ='''Helsinki-NLP/'''
if is_torch_available():
_A : List[Any] ='''pt'''
elif is_tf_available():
_A : Optional[int] ='''tf'''
else:
_A : Dict ='''jax'''
@require_sentencepiece
class _lowercase ( _lowercase , unittest.TestCase ):
a = MarianTokenizer
a = False
a = True
def lowerCamelCase_ ( self: List[str] ):
super().setUp()
lowerCamelCase__ : List[Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
lowerCamelCase__ : Optional[Any] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
lowerCamelCase__ : Optional[int] = Path(self.tmpdirname )
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
lowerCamelCase__ : Dict = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self: Optional[Any] , **UpperCamelCase__: Any ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: str , UpperCamelCase__: List[str] ):
return (
"This is a test",
"This is a test",
)
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Any = """</s>"""
lowerCamelCase__ : List[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(UpperCamelCase__ ) , 9 )
def lowerCamelCase_ ( self: int ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : List[Any] = MarianTokenizer.from_pretrained(F'''{ORG_NAME}opus-mt-en-de''' )
lowerCamelCase__ : Optional[int] = en_de_tokenizer(["""I am a small frog"""] , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = [38, 121, 14, 697, 38_848, 0]
self.assertListEqual(UpperCamelCase__ , batch.input_ids[0] )
lowerCamelCase__ : List[str] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Tuple = [x.name for x in Path(UpperCamelCase__ ).glob("""*""" )]
self.assertIn("""source.spm""" , UpperCamelCase__ )
MarianTokenizer.from_pretrained(UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : List[Any] = self.get_tokenizer()
lowerCamelCase__ : Any = tok(
["""I am a small frog""" * 1_000, """I am a small frog"""] , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : str = self.get_tokenizer()
lowerCamelCase__ : Dict = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def lowerCamelCase_ ( self: List[str] ):
# fmt: off
lowerCamelCase__ : int = {"""input_ids""": [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Union[str, Any] = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
lowerCamelCase__ : str = """Tämä on testi"""
lowerCamelCase__ : Any = """This is a test"""
lowerCamelCase__ : int = [76, 7, 2_047, 2]
lowerCamelCase__ : List[str] = [69, 12, 11, 940, 2]
lowerCamelCase__ : Tuple = tokenizer(UpperCamelCase__ ).input_ids
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = tokenizer(text_target=UpperCamelCase__ ).input_ids
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Tuple = tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
| 702 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Union[str, Any] =logging.get_logger(__name__)
_A : List[str] ={
'''MIT/ast-finetuned-audioset-10-10-0.4593''': (
'''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'''
),
}
class _lowercase ( _lowercase ):
a = """audio-spectrogram-transformer"""
def __init__( self: str , UpperCamelCase__: Any=768 , UpperCamelCase__: Union[str, Any]=12 , UpperCamelCase__: List[Any]=12 , UpperCamelCase__: int=3_072 , UpperCamelCase__: Optional[Any]="gelu" , UpperCamelCase__: Optional[int]=0.0 , UpperCamelCase__: Tuple=0.0 , UpperCamelCase__: Union[str, Any]=0.02 , UpperCamelCase__: Dict=1e-12 , UpperCamelCase__: List[str]=16 , UpperCamelCase__: List[str]=True , UpperCamelCase__: Any=10 , UpperCamelCase__: List[str]=10 , UpperCamelCase__: Any=1_024 , UpperCamelCase__: Optional[Any]=128 , **UpperCamelCase__: Union[str, Any] , ):
super().__init__(**UpperCamelCase__ )
lowerCamelCase__ : List[Any] = hidden_size
lowerCamelCase__ : int = num_hidden_layers
lowerCamelCase__ : List[str] = num_attention_heads
lowerCamelCase__ : Optional[int] = intermediate_size
lowerCamelCase__ : List[Any] = hidden_act
lowerCamelCase__ : List[Any] = hidden_dropout_prob
lowerCamelCase__ : str = attention_probs_dropout_prob
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : List[str] = layer_norm_eps
lowerCamelCase__ : List[Any] = patch_size
lowerCamelCase__ : List[str] = qkv_bias
lowerCamelCase__ : Dict = frequency_stride
lowerCamelCase__ : List[Any] = time_stride
lowerCamelCase__ : str = max_length
lowerCamelCase__ : Dict = num_mel_bins
| 631 | 0 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
_A : str =(720, 1_280) # Height, Width
_A : List[Any] =(0.4, 0.6) # if height or width lower than this scale, drop it.
_A : int =1 / 100
_A : List[str] =''''''
_A : Dict =''''''
_A : List[str] =''''''
_A : List[Any] =250
def SCREAMING_SNAKE_CASE_ () -> None:
lowerCamelCase__ : Union[str, Any] = get_dataset(UpperCamelCase , UpperCamelCase )
for index in range(UpperCamelCase ):
lowerCamelCase__ : List[str] = random.sample(range(len(UpperCamelCase ) ) , 4 )
lowerCamelCase__ : Any = update_image_and_anno(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , filter_scale=UpperCamelCase , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowerCamelCase__ : Any = random_chars(32 )
lowerCamelCase__ : Tuple = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
lowerCamelCase__ : Tuple = f'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(f'''{file_root}.jpg''' , UpperCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
lowerCamelCase__ : Dict = []
for anno in new_annos:
lowerCamelCase__ : List[str] = anno[3] - anno[1]
lowerCamelCase__ : Any = anno[4] - anno[2]
lowerCamelCase__ : Tuple = anno[1] + width / 2
lowerCamelCase__ : Optional[int] = anno[2] + height / 2
lowerCamelCase__ : Union[str, Any] = f'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(UpperCamelCase )
with open(f'''{file_root}.txt''' , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> tuple[list, list]:
lowerCamelCase__ : Optional[int] = []
lowerCamelCase__ : Tuple = []
for label_file in glob.glob(os.path.join(UpperCamelCase , """*.txt""" ) ):
lowerCamelCase__ : int = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(UpperCamelCase ) as in_file:
lowerCamelCase__ : Union[str, Any] = in_file.readlines()
lowerCamelCase__ : Dict = os.path.join(UpperCamelCase , f'''{label_name}.jpg''' )
lowerCamelCase__ : List[Any] = []
for obj_list in obj_lists:
lowerCamelCase__ : Dict = obj_list.rstrip("""\n""" ).split(""" """ )
lowerCamelCase__ : Dict = float(obj[1] ) - float(obj[3] ) / 2
lowerCamelCase__ : List[str] = float(obj[2] ) - float(obj[4] ) / 2
lowerCamelCase__ : Optional[Any] = float(obj[1] ) + float(obj[3] ) / 2
lowerCamelCase__ : List[Any] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(UpperCamelCase )
labels.append(UpperCamelCase )
return img_paths, labels
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = 0.0 , ) -> tuple[list, list, str]:
lowerCamelCase__ : str = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
lowerCamelCase__ : Any = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowerCamelCase__ : Optional[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowerCamelCase__ : List[str] = int(scale_x * output_size[1] )
lowerCamelCase__ : Dict = int(scale_y * output_size[0] )
lowerCamelCase__ : Union[str, Any] = []
lowerCamelCase__ : List[str] = []
for i, index in enumerate(UpperCamelCase ):
lowerCamelCase__ : Dict = all_img_list[index]
path_list.append(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = all_annos[index]
lowerCamelCase__ : List[str] = cva.imread(UpperCamelCase )
if i == 0: # top-left
lowerCamelCase__ : Tuple = cva.resize(UpperCamelCase , (divid_point_x, divid_point_y) )
lowerCamelCase__ : Tuple = img
for bbox in img_annos:
lowerCamelCase__ : Any = bbox[1] * scale_x
lowerCamelCase__ : Union[str, Any] = bbox[2] * scale_y
lowerCamelCase__ : Union[str, Any] = bbox[3] * scale_x
lowerCamelCase__ : Dict = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
lowerCamelCase__ : Dict = cva.resize(UpperCamelCase , (output_size[1] - divid_point_x, divid_point_y) )
lowerCamelCase__ : Optional[Any] = img
for bbox in img_annos:
lowerCamelCase__ : Any = scale_x + bbox[1] * (1 - scale_x)
lowerCamelCase__ : Tuple = bbox[2] * scale_y
lowerCamelCase__ : Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
lowerCamelCase__ : Optional[Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
lowerCamelCase__ : Union[str, Any] = cva.resize(UpperCamelCase , (divid_point_x, output_size[0] - divid_point_y) )
lowerCamelCase__ : str = img
for bbox in img_annos:
lowerCamelCase__ : str = bbox[1] * scale_x
lowerCamelCase__ : Any = scale_y + bbox[2] * (1 - scale_y)
lowerCamelCase__ : int = bbox[3] * scale_x
lowerCamelCase__ : List[str] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
lowerCamelCase__ : List[Any] = cva.resize(
UpperCamelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
lowerCamelCase__ : str = img
for bbox in img_annos:
lowerCamelCase__ : List[str] = scale_x + bbox[1] * (1 - scale_x)
lowerCamelCase__ : Any = scale_y + bbox[2] * (1 - scale_y)
lowerCamelCase__ : Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
lowerCamelCase__ : Optional[Any] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
lowerCamelCase__ : Any = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
assert number_char > 1, "The number of character should greater than 1"
lowerCamelCase__ : Optional[int] = ascii_lowercase + digits
return "".join(random.choice(UpperCamelCase ) for _ in range(UpperCamelCase ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 703 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
_A : List[str] ='''examples/'''
_A : Any ={
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
_A : int ={
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
_A : int ='''README.md'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
with open(UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ : List[str] = f.read()
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = REPLACE_PATTERNS[pattern]
lowerCamelCase__ : Dict = replace.replace("""VERSION""" , UpperCamelCase )
lowerCamelCase__ : str = re_pattern.sub(UpperCamelCase , UpperCamelCase )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
for folder, directories, fnames in os.walk(UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(UpperCamelCase , UpperCamelCase ) , UpperCamelCase , pattern="""examples""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> List[Any]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCamelCase , UpperCamelCase , UpperCamelCase )
if not patch:
update_version_in_examples(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Optional[Any]:
lowerCamelCase__ : Dict = """🤗 Transformers currently provides the following architectures"""
lowerCamelCase__ : Dict = """1. Want to contribute a new model?"""
with open(UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ : int = f.readlines()
# Find the start of the list.
lowerCamelCase__ : Optional[int] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCamelCase__ : Optional[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
lowerCamelCase__ : List[Any] = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Optional[Any]:
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
lowerCamelCase__ : int = f.read()
lowerCamelCase__ : Optional[Any] = REPLACE_PATTERNS["""init"""][0].search(UpperCamelCase ).groups()[0]
return packaging.version.parse(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase=False ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
lowerCamelCase__ : List[str] = default_version.base_version
elif patch:
lowerCamelCase__ : Any = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
lowerCamelCase__ : List[Any] = f'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
lowerCamelCase__ : Any = input(f'''Which version are you releasing? [{default_version}]''' )
if len(UpperCamelCase ) == 0:
lowerCamelCase__ : Optional[int] = default_version
print(f'''Updating version to {version}.''' )
global_version_update(UpperCamelCase , patch=UpperCamelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def SCREAMING_SNAKE_CASE_ () -> List[str]:
lowerCamelCase__ : Optional[int] = get_version()
lowerCamelCase__ : Any = f'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
lowerCamelCase__ : Any = current_version.base_version
# Check with the user we got that right.
lowerCamelCase__ : List[Any] = input(f'''Which version are we developing now? [{dev_version}]''' )
if len(UpperCamelCase ) == 0:
lowerCamelCase__ : Dict = dev_version
print(f'''Updating version to {version}.''' )
global_version_update(UpperCamelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_A : List[Any] =argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
_A : List[str] =parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 631 | 0 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowercase ( _lowercase , unittest.TestCase ):
# TODO: is there an appropriate internal test set?
a = """ssube/stable-diffusion-x4-upscaler-onnx"""
def lowerCamelCase_ ( self: int , UpperCamelCase__: Optional[Any]=0 ):
lowerCamelCase__ : Optional[Any] = floats_tensor((1, 3, 128, 128) , rng=random.Random(UpperCamelCase__ ) )
lowerCamelCase__ : Dict = torch.manual_seed(UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = self.get_dummy_inputs()
lowerCamelCase__ : Optional[Any] = pipe(**UpperCamelCase__ ).images
lowerCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
lowerCamelCase__ : Any = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
lowerCamelCase__ : Tuple = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase__ : int = self.get_dummy_inputs()
lowerCamelCase__ : int = pipe(**UpperCamelCase__ ).images
lowerCamelCase__ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase__ : Union[str, Any] = np.array(
[0.6_898_892, 0.59_240_556, 0.52_499_527, 0.58_866_215, 0.52_258_235, 0.52_572_715, 0.62_414_473, 0.6_174_387, 0.6_214_964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
lowerCamelCase__ : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = self.get_dummy_inputs()
lowerCamelCase__ : Dict = pipe(**UpperCamelCase__ ).images
lowerCamelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase__ : List[Any] = np.array(
[0.7_659_278, 0.76_437_664, 0.75_579_107, 0.7_691_116, 0.77_666_986, 0.7_727_672, 0.7_758_664, 0.7_812_226, 0.76_942_515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
lowerCamelCase__ : Tuple = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = self.get_dummy_inputs()
lowerCamelCase__ : int = pipe(**UpperCamelCase__ ).images
lowerCamelCase__ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase__ : Dict = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
lowerCamelCase__ : int = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = self.get_dummy_inputs()
lowerCamelCase__ : Dict = pipe(**UpperCamelCase__ ).images
lowerCamelCase__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase__ : int = np.array(
[0.77_424_496, 0.773_601, 0.7_645_288, 0.7_769_598, 0.7_772_739, 0.7_738_688, 0.78_187_233, 0.77_879_584, 0.767_043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
@property
def lowerCamelCase_ ( self: Optional[Any] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : int = ort.SessionOptions()
lowerCamelCase__ : List[Any] = False
return options
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
lowerCamelCase__ : Union[str, Any] = init_image.resize((128, 128) )
# using the PNDM scheduler by default
lowerCamelCase__ : Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = """A fantasy landscape, trending on artstation"""
lowerCamelCase__ : int = torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] = pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCamelCase__ , output_type="""np""" , )
lowerCamelCase__ : int = output.images
lowerCamelCase__ : Union[str, Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
lowerCamelCase__ : List[Any] = np.array([0.4_883, 0.4_947, 0.4_980, 0.4_975, 0.4_982, 0.4_980, 0.5_000, 0.5_006, 0.4_972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
lowerCamelCase__ : Union[str, Any] = init_image.resize((128, 128) )
lowerCamelCase__ : Optional[int] = LMSDiscreteScheduler.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , subfolder="""scheduler""" )
lowerCamelCase__ : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , scheduler=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase__ : Tuple = """A fantasy landscape, trending on artstation"""
lowerCamelCase__ : List[str] = torch.manual_seed(0 )
lowerCamelCase__ : int = pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=20 , generator=UpperCamelCase__ , output_type="""np""" , )
lowerCamelCase__ : Optional[Any] = output.images
lowerCamelCase__ : Any = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
lowerCamelCase__ : int = np.array(
[0.50_173_753, 0.50_223_356, 0.502_039, 0.50_233_036, 0.5_023_725, 0.5_022_601, 0.5_018_758, 0.50_234_085, 0.50_241_566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 704 |
'''simple docstring'''
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_A : Union[str, Any] =False
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: str=32 ):
set_seed(0 )
lowerCamelCase__ : Optional[int] = UNetaDModel(sample_size=UpperCamelCase__ , in_channels=3 , out_channels=3 )
lowerCamelCase__ : List[Any] = torch.optim.SGD(model.parameters() , lr=0.0_001 )
return model, optimizer
@slow
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Optional[Any] = """cpu""" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
lowerCamelCase__ : List[Any] = DDPMScheduler(
num_train_timesteps=1_000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=UpperCamelCase__ , )
lowerCamelCase__ : Any = DDIMScheduler(
num_train_timesteps=1_000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=UpperCamelCase__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
lowerCamelCase__ : str = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(UpperCamelCase__ ) for _ in range(4 )]
lowerCamelCase__ : Tuple = [torch.randn((4, 3, 32, 32) ).to(UpperCamelCase__ ) for _ in range(4 )]
lowerCamelCase__ : Tuple = [torch.randint(0 , 1_000 , (4,) ).long().to(UpperCamelCase__ ) for _ in range(4 )]
# train with a DDPM scheduler
lowerCamelCase__ , lowerCamelCase__ : Any = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
lowerCamelCase__ : str = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCamelCase__ : str = model(UpperCamelCase__ , timesteps[i] ).sample
lowerCamelCase__ : Tuple = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
lowerCamelCase__ : Optional[Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCamelCase__ : Dict = model(UpperCamelCase__ , timesteps[i] ).sample
lowerCamelCase__ : Union[str, Any] = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
| 631 | 0 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> str:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
lowerCamelCase__ : List[str] = flax_key_tuple[:-1] + ("""weight""",)
lowerCamelCase__ : Tuple = torch.permute(UpperCamelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(UpperCamelCase ):
# linear layer
lowerCamelCase__ : List[str] = flax_key_tuple[:-1] + ("""weight""",)
lowerCamelCase__ : str = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowerCamelCase__ : Optional[Any] = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
if "metadata" in layer:
lowerCamelCase__ : Optional[Any] = layer.split("""metadata""" )
lowerCamelCase__ : Any = """""".join(split_layer[0] )[:-1]
lowerCamelCase__ : Optional[Any] = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
lowerCamelCase__ : Dict = layer.split("""kvstore""" )
lowerCamelCase__ : List[str] = """""".join(split_layer[0] )[:-1]
lowerCamelCase__ : str = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
lowerCamelCase__ : Optional[int] = layer.split("""/""" )
lowerCamelCase__ : str = """/""".join(split_layer[:-1] )
lowerCamelCase__ : List[str] = (split_layer[-1],)
if "kvstore/path" in layer:
lowerCamelCase__ : Optional[Any] = f'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
lowerCamelCase__ : List[str] = """file"""
else:
lowerCamelCase__ : List[Any] = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCamelCase__ : Tuple = rename_keys(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = {}
for k, v in current_block.items():
lowerCamelCase__ : int = v
lowerCamelCase__ : Optional[Any] = new_current_block
torch.save(UpperCamelCase , UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = WEIGHTS_NAME ) -> Tuple:
lowerCamelCase__ : Any = convert_file_size_to_int(UpperCamelCase )
lowerCamelCase__ : List[Any] = []
lowerCamelCase__ : Optional[int] = {}
lowerCamelCase__ : Union[str, Any] = 0
lowerCamelCase__ : Optional[Any] = 0
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
lowerCamelCase__ : List[Any] = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
lowerCamelCase__ : Dict = flatten_dict(UpperCamelCase , sep="""/""" )
lowerCamelCase__ : Optional[int] = {}
for layer in checkpoint_info.keys():
lowerCamelCase__ : Union[str, Any] = get_key_and_tensorstore_dict(
UpperCamelCase , UpperCamelCase , UpperCamelCase )
if curr_real_layer_name in all_layers:
lowerCamelCase__ : List[Any] = content
else:
lowerCamelCase__ : Any = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
lowerCamelCase__ : Union[str, Any] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
lowerCamelCase__ : str = torch.tensor(UpperCamelCase )
lowerCamelCase__ : Optional[Any] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
lowerCamelCase__ : Optional[Any] = rename_base_flax_keys(tuple(key.split("""/""" ) ) , UpperCamelCase )
lowerCamelCase__ : List[str] = """/""".join(UpperCamelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
lowerCamelCase__ : str = os.path.join(
UpperCamelCase , weights_name.replace(""".bin""" , f'''-{len(UpperCamelCase )+1:05d}-of-???.bin''' ) )
rename_and_save_block(UpperCamelCase , UpperCamelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
lowerCamelCase__ : Tuple = {}
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Optional[int] = raw_weights.to(getattr(UpperCamelCase , UpperCamelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
lowerCamelCase__ : Optional[int] = os.path.join(UpperCamelCase , weights_name.replace(""".bin""" , f'''-{len(UpperCamelCase )+1:05d}-of-???.bin''' ) )
rename_and_save_block(UpperCamelCase , UpperCamelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(UpperCamelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
lowerCamelCase__ : Optional[int] = {}
lowerCamelCase__ : List[Any] = {}
for idx, shard in enumerate(UpperCamelCase ):
lowerCamelCase__ : List[str] = weights_name.replace(
""".bin""" , f'''-{idx+1:05d}-of-{len(UpperCamelCase ):05d}.bin''' ) # len(sharded_state_dicts):05d}
lowerCamelCase__ : Optional[Any] = os.path.join(UpperCamelCase , weights_name.replace(""".bin""" , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(UpperCamelCase , os.path.join(UpperCamelCase , UpperCamelCase ) )
lowerCamelCase__ : Any = shard
for key in shard:
lowerCamelCase__ : Tuple = shard_file
# Add the metadata
lowerCamelCase__ : Optional[Any] = {"""total_size""": total_size}
lowerCamelCase__ : Union[str, Any] = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(UpperCamelCase , UpperCamelCase ) , """w""" , encoding="""utf-8""" ) as f:
lowerCamelCase__ : Optional[Any] = json.dumps(UpperCamelCase , indent=2 , sort_keys=UpperCamelCase ) + """\n"""
f.write(UpperCamelCase )
return metadata, index
if __name__ == "__main__":
_A : List[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--max_shard_size''', default='''10GB''', required=False, help='''Max shard size''')
parser.add_argument('''--dtype''', default='''bfloat16''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
_A : Any =parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def SCREAMING_SNAKE_CASE_ () -> List[Any]:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
lowerCamelCase__ : Tuple = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
lowerCamelCase__ : Dict = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
lowerCamelCase__ : str = TaTokenizer.from_pretrained("""t5-small""" )
lowerCamelCase__ : Dict = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
lowerCamelCase__ : Tuple = tokenizer(UpperCamelCase , return_tensors="""pt""" ).input_ids
lowerCamelCase__ : int = model.generate(UpperCamelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 705 |
'''simple docstring'''
from statistics import mean
import numpy as np
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list:
lowerCamelCase__ : Optional[int] = 0
# Number of processes finished
lowerCamelCase__ : Union[str, Any] = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
lowerCamelCase__ : Tuple = [0] * no_of_process
# List to include calculation results
lowerCamelCase__ : List[str] = [0] * no_of_process
# Sort by arrival time.
lowerCamelCase__ : Union[str, Any] = [burst_time[i] for i in np.argsort(UpperCamelCase )]
lowerCamelCase__ : List[Any] = [process_name[i] for i in np.argsort(UpperCamelCase )]
arrival_time.sort()
while no_of_process > finished_process_count:
lowerCamelCase__ : str = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
lowerCamelCase__ : Union[str, Any] = arrival_time[i]
lowerCamelCase__ : Any = 0
# Index showing the location of the process being performed
lowerCamelCase__ : Union[str, Any] = 0
# Saves the current response ratio.
lowerCamelCase__ : Any = 0
for i in range(0 , UpperCamelCase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
lowerCamelCase__ : Optional[int] = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
lowerCamelCase__ : int = temp
lowerCamelCase__ : str = i
# Calculate the turn around time
lowerCamelCase__ : Optional[int] = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
lowerCamelCase__ : List[str] = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list:
lowerCamelCase__ : int = [0] * no_of_process
for i in range(0 , UpperCamelCase ):
lowerCamelCase__ : Optional[Any] = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
_A : List[str] =5
_A : Optional[Any] =['''A''', '''B''', '''C''', '''D''', '''E''']
_A : Optional[int] =[1, 2, 3, 4, 5]
_A : Dict =[1, 2, 3, 4, 5]
_A : Any =calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
_A : Optional[int] =calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''')
for i in range(0, no_of_process):
print(
F'{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'
F'{turn_around_time[i]}\t\t\t{waiting_time[i]}'
)
print(F'average waiting time : {mean(waiting_time):.5f}')
print(F'average turn around time : {mean(turn_around_time):.5f}')
| 631 | 0 |
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline | 706 |
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 631 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> int:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError("""Input must be an integer""" )
if input_num <= 0:
raise ValueError("""Input must be positive""" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
def __init__( self: List[str] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str]=13 , UpperCamelCase__: Optional[int]=30 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: List[str]=3 , UpperCamelCase__: List[str]=True , UpperCamelCase__: Any=True , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Any=5 , UpperCamelCase__: Optional[Any]=4 , UpperCamelCase__: Dict=37 , UpperCamelCase__: List[Any]="gelu" , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: List[Any]=10 , UpperCamelCase__: Tuple=0.02 , UpperCamelCase__: Optional[int]=3 , UpperCamelCase__: Dict=0.6 , UpperCamelCase__: int=None , ):
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : Optional[Any] = patch_size
lowerCamelCase__ : Any = num_channels
lowerCamelCase__ : Any = is_training
lowerCamelCase__ : Union[str, Any] = use_labels
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : List[str] = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : str = intermediate_size
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : Any = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : int = initializer_range
lowerCamelCase__ : List[str] = mask_ratio
lowerCamelCase__ : List[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowerCamelCase__ : str = (image_size // patch_size) ** 2
lowerCamelCase__ : Optional[int] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[int] = None
if self.use_labels:
lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Any = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: str ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[int] , UpperCamelCase__: int ):
lowerCamelCase__ : Tuple = ViTMAEModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: str , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict ):
lowerCamelCase__ : int = ViTMAEForPreTraining(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ )
lowerCamelCase__ : Any = (self.image_size // self.patch_size) ** 2
lowerCamelCase__ : str = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowerCamelCase__ : Dict = 1
lowerCamelCase__ : Optional[int] = ViTMAEForPreTraining(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
lowerCamelCase__ : Tuple = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Optional[int] = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any = config_and_inputs
lowerCamelCase__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
a = {"""feature-extraction""": ViTMAEModel} if is_torch_available() else {}
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Tuple = ViTMAEModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self: Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def lowerCamelCase_ ( self: Dict ):
pass
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : str = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase__ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Any = model_class(UpperCamelCase__ )
lowerCamelCase__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Any = [*signature.parameters.keys()]
lowerCamelCase__ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Dict , UpperCamelCase__: Optional[int] ):
# make masks reproducible
np.random.seed(2 )
lowerCamelCase__ : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
lowerCamelCase__ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCamelCase__ : Tuple = torch.from_numpy(UpperCamelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowerCamelCase__ : Tuple = pt_noise
super().check_pt_tf_models(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCamelCase__ : Optional[int] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Optional[int] = outputs[0].cpu().numpy()
lowerCamelCase__ : List[str] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : List[str] = model_class.from_pretrained(UpperCamelCase__ )
model.to(UpperCamelCase__ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
# Make sure we don't have nans
lowerCamelCase__ : Dict = after_outputs[0].cpu().numpy()
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase__ , 1e-5 )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self: Any ):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self: List[str] ):
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def lowerCamelCase_ ( self: Tuple ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase_ ( self: Optional[int] ):
pass
@slow
def lowerCamelCase_ ( self: List[str] ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Tuple = ViTMAEModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> List[Any]:
lowerCamelCase__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: List[str] ):
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self: Tuple ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowerCamelCase__ : str = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(UpperCamelCase__ )
lowerCamelCase__ : Tuple = self.default_image_processor
lowerCamelCase__ : List[str] = prepare_img()
lowerCamelCase__ : int = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowerCamelCase__ : List[str] = ViTMAEConfig()
lowerCamelCase__ : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowerCamelCase__ : Any = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
lowerCamelCase__ : List[Any] = model(**UpperCamelCase__ , noise=torch.from_numpy(UpperCamelCase__ ).to(device=UpperCamelCase__ ) )
# verify the logits
lowerCamelCase__ : List[str] = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : str = torch.tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCamelCase__ ) , atol=1e-4 ) )
| 631 | 0 |
'''simple docstring'''
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
_A : Union[str, Any] =logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]:
return max(metric_fn(UpperCamelCase , UpperCamelCase ) for gt in ground_truths )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
lowerCamelCase__ : Union[str, Any] = [line.strip() for line in open(UpperCamelCase , """r""" ).readlines()]
lowerCamelCase__ : List[Any] = []
if args.gold_data_mode == "qa":
lowerCamelCase__ : str = pd.read_csv(UpperCamelCase , sep="""\t""" , header=UpperCamelCase )
for answer_list in data[1]:
lowerCamelCase__ : Union[str, Any] = ast.literal_eval(UpperCamelCase )
answers.append(UpperCamelCase )
else:
lowerCamelCase__ : Optional[Any] = [line.strip() for line in open(UpperCamelCase , """r""" ).readlines()]
lowerCamelCase__ : Union[str, Any] = [[reference] for reference in references]
lowerCamelCase__ : Optional[int] = 0
for prediction, ground_truths in zip(UpperCamelCase , UpperCamelCase ):
total += 1
em += metric_max_over_ground_truths(UpperCamelCase , UpperCamelCase , UpperCamelCase )
fa += metric_max_over_ground_truths(UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = 100.0 * em / total
lowerCamelCase__ : str = 100.0 * fa / total
logger.info(f'''F1: {fa:.2f}''' )
logger.info(f'''EM: {em:.2f}''' )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
lowerCamelCase__ : Union[str, Any] = args.k
lowerCamelCase__ : int = [line.strip() for line in open(UpperCamelCase , """r""" ).readlines()]
lowerCamelCase__ : Tuple = [line.strip() for line in open(UpperCamelCase , """r""" ).readlines()]
lowerCamelCase__ : str = 0
for hypo, reference in zip(UpperCamelCase , UpperCamelCase ):
lowerCamelCase__ : Dict = set(hypo.split("""\t""" )[:k] )
lowerCamelCase__ : Optional[Any] = set(reference.split("""\t""" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
lowerCamelCase__ : List[str] = 100.0 * em / total
logger.info(f'''Precision@{k}: {em: .2f}''' )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
def strip_title(UpperCamelCase ):
if title.startswith("""\"""" ):
lowerCamelCase__ : Tuple = title[1:]
if title.endswith("""\"""" ):
lowerCamelCase__ : Any = title[:-1]
return title
lowerCamelCase__ : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
UpperCamelCase , return_tensors="""pt""" , padding=UpperCamelCase , truncation=UpperCamelCase , )["""input_ids"""].to(args.device )
lowerCamelCase__ : Optional[Any] = rag_model.rag.question_encoder(UpperCamelCase )
lowerCamelCase__ : Optional[Any] = question_enc_outputs[0]
lowerCamelCase__ : List[str] = rag_model.retriever(
UpperCamelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , )
lowerCamelCase__ : List[str] = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
lowerCamelCase__ : List[Any] = []
for docs in all_docs:
lowerCamelCase__ : Optional[int] = [strip_title(UpperCamelCase ) for title in docs["""title"""]]
provenance_strings.append("""\t""".join(UpperCamelCase ) )
return provenance_strings
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]:
with torch.no_grad():
lowerCamelCase__ : Any = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
UpperCamelCase , return_tensors="""pt""" , padding=UpperCamelCase , truncation=UpperCamelCase )
lowerCamelCase__ : Tuple = inputs_dict.input_ids.to(args.device )
lowerCamelCase__ : Tuple = inputs_dict.attention_mask.to(args.device )
lowerCamelCase__ : Dict = rag_model.generate( # rag_model overwrites generate
UpperCamelCase , attention_mask=UpperCamelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=UpperCamelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
lowerCamelCase__ : List[Any] = rag_model.retriever.generator_tokenizer.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase )
if args.print_predictions:
for q, a in zip(UpperCamelCase , UpperCamelCase ):
logger.info("""Q: {} - A: {}""".format(UpperCamelCase , UpperCamelCase ) )
return answers
def SCREAMING_SNAKE_CASE_ () -> str:
lowerCamelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=UpperCamelCase , help=(
"""RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"""
""" model_name_or_path"""
) , )
parser.add_argument(
"""--index_name""" , default=UpperCamelCase , choices=["""exact""", """compressed""", """legacy"""] , type=UpperCamelCase , help="""RAG model retriever type""" , )
parser.add_argument(
"""--index_path""" , default=UpperCamelCase , type=UpperCamelCase , help="""Path to the retrieval index""" , )
parser.add_argument("""--n_docs""" , default=5 , type=UpperCamelCase , help="""Number of retrieved docs""" )
parser.add_argument(
"""--model_name_or_path""" , default=UpperCamelCase , type=UpperCamelCase , required=UpperCamelCase , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=UpperCamelCase , help=(
"""Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"""
""" precision@k."""
) , )
parser.add_argument("""--k""" , default=1 , type=UpperCamelCase , help="""k for the precision@k calculation""" )
parser.add_argument(
"""--evaluation_set""" , default=UpperCamelCase , type=UpperCamelCase , required=UpperCamelCase , help="""Path to a file containing evaluation samples""" , )
parser.add_argument(
"""--gold_data_path""" , default=UpperCamelCase , type=UpperCamelCase , required=UpperCamelCase , help="""Path to a tab-separated file with gold samples""" , )
parser.add_argument(
"""--gold_data_mode""" , default="""qa""" , type=UpperCamelCase , choices=["""qa""", """ans"""] , help=(
"""Format of the gold data file"""
"""qa - a single line in the following format: question [tab] answer_list"""
"""ans - a single line of the gold file contains the expected answer string"""
) , )
parser.add_argument(
"""--predictions_path""" , type=UpperCamelCase , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , )
parser.add_argument(
"""--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , )
parser.add_argument(
"""--eval_batch_size""" , default=8 , type=UpperCamelCase , help="""Batch size per GPU/CPU for evaluation.""" , )
parser.add_argument(
"""--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , )
parser.add_argument(
"""--num_beams""" , default=4 , type=UpperCamelCase , help="""Number of beams to be used when generating answers""" , )
parser.add_argument("""--min_length""" , default=1 , type=UpperCamelCase , help="""Min length of the generated answers""" )
parser.add_argument("""--max_length""" , default=50 , type=UpperCamelCase , help="""Max length of the generated answers""" )
parser.add_argument(
"""--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , )
parser.add_argument(
"""--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , )
lowerCamelCase__ : Optional[Any] = parser.parse_args()
lowerCamelCase__ : str = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
return args
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Any:
lowerCamelCase__ : List[Any] = {}
if args.model_type is None:
lowerCamelCase__ : str = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("""rag""" ):
lowerCamelCase__ : Union[str, Any] = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration
lowerCamelCase__ : Tuple = args.n_docs
if args.index_name is not None:
lowerCamelCase__ : Dict = args.index_name
if args.index_path is not None:
lowerCamelCase__ : Optional[Any] = args.index_path
else:
lowerCamelCase__ : Tuple = BartForConditionalGeneration
lowerCamelCase__ : Any = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("""Evaluate the following checkpoints: %s""" , UpperCamelCase )
lowerCamelCase__ : Any = get_scores if args.eval_mode == """e2e""" else get_precision_at_k
lowerCamelCase__ : Tuple = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) )
score_fn(UpperCamelCase , args.predictions_path , args.gold_data_path )
continue
logger.info("""***** Running evaluation for {} *****""".format(UpperCamelCase ) )
logger.info(""" Batch size = %d""" , args.eval_batch_size )
logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) )
if args.model_type.startswith("""rag""" ):
lowerCamelCase__ : Optional[int] = RagRetriever.from_pretrained(UpperCamelCase , **UpperCamelCase )
lowerCamelCase__ : Optional[int] = model_class.from_pretrained(UpperCamelCase , retriever=UpperCamelCase , **UpperCamelCase )
model.retriever.init_retrieval()
else:
lowerCamelCase__ : List[str] = model_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
model.to(args.device )
with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file:
lowerCamelCase__ : Optional[Any] = []
for line in tqdm(UpperCamelCase ):
questions.append(line.strip() )
if len(UpperCamelCase ) == args.eval_batch_size:
lowerCamelCase__ : Dict = evaluate_batch_fn(UpperCamelCase , UpperCamelCase , UpperCamelCase )
preds_file.write("""\n""".join(UpperCamelCase ) + """\n""" )
preds_file.flush()
lowerCamelCase__ : Dict = []
if len(UpperCamelCase ) > 0:
lowerCamelCase__ : Dict = evaluate_batch_fn(UpperCamelCase , UpperCamelCase , UpperCamelCase )
preds_file.write("""\n""".join(UpperCamelCase ) )
preds_file.flush()
score_fn(UpperCamelCase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
_A : int =get_args()
main(args)
| 708 |
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowercase ( _lowercase ):
a = """"""
a = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
a = None # compression type in fsspec. ex: "gzip"
a = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self: str , UpperCamelCase__: str = "" , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Optional[dict] = None , **UpperCamelCase__: List[Any] ):
super().__init__(self , **UpperCamelCase__ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCamelCase__ : List[Any] = fsspec.open(
UpperCamelCase__ , mode="""rb""" , protocol=UpperCamelCase__ , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowerCamelCase__ : str = os.path.basename(self.file.path.split("""::""" )[0] )
lowerCamelCase__ : Union[str, Any] = (
self.compressed_name[: self.compressed_name.rindex(""".""" )]
if """.""" in self.compressed_name
else self.compressed_name
)
lowerCamelCase__ : Tuple = None
@classmethod
def lowerCamelCase_ ( cls: Optional[int] , UpperCamelCase__: Optional[int] ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(UpperCamelCase__ ).lstrip("""/""" )
def lowerCamelCase_ ( self: Tuple ):
if self.dir_cache is None:
lowerCamelCase__ : Dict = {**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name}
lowerCamelCase__ : int = {f["""name"""]: f}
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: str ):
return self.file.open().read()
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: str , UpperCamelCase__: str = "rb" , UpperCamelCase__: Optional[int]=None , UpperCamelCase__: Tuple=True , UpperCamelCase__: Tuple=None , **UpperCamelCase__: Optional[Any] , ):
lowerCamelCase__ : Union[str, Any] = self._strip_protocol(UpperCamelCase__ )
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class _lowercase ( _lowercase ):
a = """bz2"""
a = """bz2"""
a = """.bz2"""
class _lowercase ( _lowercase ):
a = """gzip"""
a = """gzip"""
a = """.gz"""
class _lowercase ( _lowercase ):
a = """lz4"""
a = """lz4"""
a = """.lz4"""
class _lowercase ( _lowercase ):
a = """xz"""
a = """xz"""
a = """.xz"""
class _lowercase ( _lowercase ):
a = """zstd"""
a = """zstd"""
a = """.zst"""
def __init__( self: int , UpperCamelCase__: str , UpperCamelCase__: str = "rb" , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Optional[dict] = None , UpperCamelCase__: int = DEFAULT_BLOCK_SIZE , **UpperCamelCase__: Dict , ):
super().__init__(
fo=UpperCamelCase__ , mode=UpperCamelCase__ , target_protocol=UpperCamelCase__ , target_options=UpperCamelCase__ , block_size=UpperCamelCase__ , **UpperCamelCase__ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCamelCase__ : Tuple = self.file.__enter__
class _lowercase :
def __init__( self: Optional[int] , UpperCamelCase__: Any ):
lowerCamelCase__ : Optional[int] = file_
def __enter__( self: List[Any] ):
self._file.__enter__()
return self
def __exit__( self: Any , *UpperCamelCase__: str , **UpperCamelCase__: Any ):
self._file.__exit__(*UpperCamelCase__ , **UpperCamelCase__ )
def __iter__( self: Any ):
return iter(self._file )
def lowerCamelCase_ ( self: List[Any] ):
return next(self._file )
def __getattr__( self: List[str] , UpperCamelCase__: Dict ):
return getattr(self._file , UpperCamelCase__ )
def fixed_enter(*UpperCamelCase__: Union[str, Any] , **UpperCamelCase__: List[str] ):
return WrappedFile(_enter(*UpperCamelCase__ , **UpperCamelCase__ ) )
lowerCamelCase__ : Optional[Any] = fixed_enter
| 631 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> list:
if len(UpperCamelCase ) <= 1:
return [tuple(UpperCamelCase )]
lowerCamelCase__ : List[Any] = []
def generate(UpperCamelCase , UpperCamelCase ):
lowerCamelCase__ : Tuple = [0] * n
res.append(tuple(UpperCamelCase ) )
lowerCamelCase__ : str = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
lowerCamelCase__ : Any = arr[i], arr[0]
else:
lowerCamelCase__ : Union[str, Any] = arr[i], arr[c[i]]
res.append(tuple(UpperCamelCase ) )
c[i] += 1
lowerCamelCase__ : Any = 0
else:
lowerCamelCase__ : Union[str, Any] = 0
i += 1
generate(len(UpperCamelCase ) , UpperCamelCase )
return res
if __name__ == "__main__":
_A : List[str] =input('''Enter numbers separated by a comma:\n''').strip()
_A : Optional[int] =[int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 709 |
'''simple docstring'''
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A : int =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
print("""Loading config file...""" )
def flatten_yaml_as_dict(UpperCamelCase , UpperCamelCase="" , UpperCamelCase="." ):
lowerCamelCase__ : Optional[int] = []
for k, v in d.items():
lowerCamelCase__ : Optional[int] = parent_key + sep + k if parent_key else k
if isinstance(UpperCamelCase , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(UpperCamelCase , UpperCamelCase , sep=UpperCamelCase ).items() )
else:
items.append((new_key, v) )
return dict(UpperCamelCase )
lowerCamelCase__ : Any = argparse.Namespace()
with open(UpperCamelCase , """r""" ) as yaml_file:
try:
lowerCamelCase__ : int = yaml.load(UpperCamelCase , Loader=yaml.FullLoader )
lowerCamelCase__ : Tuple = flatten_yaml_as_dict(UpperCamelCase )
for k, v in flat_cfg.items():
setattr(UpperCamelCase , UpperCamelCase , UpperCamelCase )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(UpperCamelCase , str(UpperCamelCase ) ) )
return config
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
lowerCamelCase__ : Union[str, Any] = MobileViTVaConfig()
lowerCamelCase__ : str = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
lowerCamelCase__ : Optional[Any] = 1000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
lowerCamelCase__ : int = 384
else:
lowerCamelCase__ : Optional[int] = 256
lowerCamelCase__ : str = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
lowerCamelCase__ : Tuple = 21000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
lowerCamelCase__ : str = 384
else:
lowerCamelCase__ : Any = 256
lowerCamelCase__ : int = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
lowerCamelCase__ : Dict = 151
lowerCamelCase__ : str = 512
lowerCamelCase__ : List[Any] = """ade20k-id2label.json"""
lowerCamelCase__ : Union[str, Any] = True
elif task_name.startswith("""voc_""" ):
lowerCamelCase__ : Tuple = 21
lowerCamelCase__ : Optional[int] = 512
lowerCamelCase__ : List[Any] = """pascal-voc-id2label.json"""
lowerCamelCase__ : Tuple = True
# orig_config
lowerCamelCase__ : Optional[int] = load_orig_config_file(UpperCamelCase )
assert getattr(UpperCamelCase , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model"
lowerCamelCase__ : int = getattr(UpperCamelCase , """model.classification.mitv2.width_multiplier""" , 1.0 )
assert (
getattr(UpperCamelCase , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
lowerCamelCase__ : Tuple = getattr(UpperCamelCase , """model.classification.activation.name""" , """swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
lowerCamelCase__ : Any = getattr(UpperCamelCase , """model.segmentation.output_stride""" , 16 )
if "_deeplabv3" in task_name:
lowerCamelCase__ : str = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] )
lowerCamelCase__ : Tuple = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_out_channels""" , 512 )
lowerCamelCase__ : List[Any] = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 )
# id2label
lowerCamelCase__ : Tuple = """huggingface/label-files"""
lowerCamelCase__ : Optional[Any] = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase__ : Union[str, Any] = {int(UpperCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : int = idalabel
lowerCamelCase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any:
lowerCamelCase__ : List[Any] = dct.pop(UpperCamelCase )
lowerCamelCase__ : Dict = val
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> Tuple:
if base_model:
lowerCamelCase__ : Optional[int] = """"""
else:
lowerCamelCase__ : Optional[Any] = """mobilevitv2."""
lowerCamelCase__ : List[str] = []
for k in state_dict.keys():
if k[:8] == "encoder.":
lowerCamelCase__ : Optional[Any] = k[8:]
else:
lowerCamelCase__ : Optional[Any] = k
if ".block." in k:
lowerCamelCase__ : Dict = k_new.replace(""".block.""" , """.""" )
if ".conv." in k:
lowerCamelCase__ : List[Any] = k_new.replace(""".conv.""" , """.convolution.""" )
if ".norm." in k:
lowerCamelCase__ : str = k_new.replace(""".norm.""" , """.normalization.""" )
if "conv_1." in k:
lowerCamelCase__ : Any = k_new.replace("""conv_1.""" , f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
lowerCamelCase__ : Optional[Any] = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
lowerCamelCase__ : Dict = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" )
if ".red_1x1." in k:
lowerCamelCase__ : str = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
lowerCamelCase__ : List[str] = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
lowerCamelCase__ : Optional[Any] = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
lowerCamelCase__ : Dict = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
lowerCamelCase__ : int = [0, 1]
elif i == 4:
lowerCamelCase__ : str = [0, 1, 2, 3]
elif i == 5:
lowerCamelCase__ : Dict = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
lowerCamelCase__ : List[Any] = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
lowerCamelCase__ : Optional[int] = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
lowerCamelCase__ : Optional[int] = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
lowerCamelCase__ : str = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" )
if "pre_norm_attn.1." in k:
lowerCamelCase__ : str = k_new.replace("""pre_norm_attn.1.""" , """attention.""" )
if "pre_norm_ffn.0." in k:
lowerCamelCase__ : Optional[Any] = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" )
if "pre_norm_ffn.1." in k:
lowerCamelCase__ : Union[str, Any] = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
lowerCamelCase__ : List[Any] = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" )
if "classifier.1." in k:
lowerCamelCase__ : Tuple = k_new.replace("""classifier.1.""" , """classifier.""" )
if "seg_head." in k:
lowerCamelCase__ : Optional[int] = k_new.replace("""seg_head.""" , """segmentation_head.""" )
if ".aspp_layer." in k:
lowerCamelCase__ : Any = k_new.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in k:
lowerCamelCase__ : Any = k_new.replace(""".aspp_pool.""" , """.""" )
rename_keys.append((k, k_new) )
return rename_keys
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[str]:
lowerCamelCase__ : Union[str, Any] = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(UpperCamelCase )
for k in keys_to_ignore:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Dict:
lowerCamelCase__ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
lowerCamelCase__ : Tuple = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCamelCase__ : str = get_mobilevitva_config(UpperCamelCase , UpperCamelCase )
# load original state_dict
lowerCamelCase__ : List[str] = torch.load(UpperCamelCase , map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
lowerCamelCase__ : int = MobileViTVaForSemanticSegmentation(UpperCamelCase ).eval()
lowerCamelCase__ : Tuple = False
else:
lowerCamelCase__ : int = MobileViTVaForImageClassification(UpperCamelCase ).eval()
lowerCamelCase__ : Optional[Any] = False
# remove and rename some keys of load the original model
lowerCamelCase__ : Tuple = checkpoint
remove_unused_keys(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = create_rename_keys(UpperCamelCase , base_model=UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# load modified state_dict
model.load_state_dict(UpperCamelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCamelCase__ : int = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowerCamelCase__ : Dict = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowerCamelCase__ : str = model(**UpperCamelCase )
# verify classification model
if task_name.startswith("""imagenet""" ):
lowerCamelCase__ : Dict = outputs.logits
lowerCamelCase__ : Optional[Any] = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
lowerCamelCase__ : Optional[Any] = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01] )
assert torch.allclose(logits[0, :3] , UpperCamelCase , atol=1E-4 )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_A : Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
_A : Dict =parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 631 | 0 |
'''simple docstring'''
import baseaa
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> bytes:
return baseaa.aaaencode(string.encode("""utf-8""" ) )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
return baseaa.aaadecode(UpperCamelCase ).decode("""utf-8""" )
if __name__ == "__main__":
import doctest
doctest.testmod() | 710 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _lowercase ( _lowercase ):
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , """width_multiplier""" ) )
class _lowercase :
def __init__( self: str , UpperCamelCase__: Optional[int] , UpperCamelCase__: str=13 , UpperCamelCase__: Any=64 , UpperCamelCase__: Optional[Any]=2 , UpperCamelCase__: str=3 , UpperCamelCase__: List[str]="swish" , UpperCamelCase__: Any=3 , UpperCamelCase__: Optional[int]=32 , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: int=0.02 , UpperCamelCase__: Dict=True , UpperCamelCase__: Dict=True , UpperCamelCase__: Any=10 , UpperCamelCase__: int=None , UpperCamelCase__: List[Any]=0.25 , UpperCamelCase__: str=0.0 , UpperCamelCase__: Optional[int]=0.0 , ):
lowerCamelCase__ : Any = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : str = patch_size
lowerCamelCase__ : Optional[int] = num_channels
lowerCamelCase__ : Optional[Any] = make_divisible(512 * width_multiplier , divisor=8 )
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Any = conv_kernel_size
lowerCamelCase__ : Any = output_stride
lowerCamelCase__ : Union[str, Any] = classifier_dropout_prob
lowerCamelCase__ : List[str] = use_labels
lowerCamelCase__ : Optional[Any] = is_training
lowerCamelCase__ : List[str] = num_labels
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : List[Any] = scope
lowerCamelCase__ : Tuple = width_multiplier
lowerCamelCase__ : List[Any] = ffn_dropout
lowerCamelCase__ : Any = attn_dropout
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple = None
lowerCamelCase__ : Optional[Any] = None
if self.use_labels:
lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCamelCase_ ( self: List[Any] ):
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: int , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : Union[str, Any] = MobileViTVaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : str = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Tuple ):
lowerCamelCase__ : Tuple = self.num_labels
lowerCamelCase__ : Dict = MobileViTVaForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : int = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: str ):
lowerCamelCase__ : List[str] = self.num_labels
lowerCamelCase__ : Union[str, Any] = MobileViTVaForSemanticSegmentation(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = config_and_inputs
lowerCamelCase__ : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
a = (
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Tuple = MobileViTVaModelTester(self )
lowerCamelCase__ : List[str] = MobileViTVaConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" )
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" )
def lowerCamelCase_ ( self: List[str] ):
pass
@unittest.skip(reason="""MobileViTV2 does not output attentions""" )
def lowerCamelCase_ ( self: Union[str, Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" )
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase_ ( self: Tuple ):
pass
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Tuple = [*signature.parameters.keys()]
lowerCamelCase__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
def check_hidden_states_output(UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Optional[int] = outputs.hidden_states
lowerCamelCase__ : List[Any] = 5
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCamelCase__ : int = 2
for i in range(len(UpperCamelCase__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : int = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : str = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: Union[str, Any] ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Union[str, Any] = MobileViTVaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> Optional[int]:
lowerCamelCase__ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Tuple ):
return (
MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" )
if is_vision_available()
else None
)
@slow
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Optional[Any] = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to(
UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = self.default_image_processor
lowerCamelCase__ : List[Any] = prepare_img()
lowerCamelCase__ : Any = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : int = model(**UpperCamelCase__ )
# verify the logits
lowerCamelCase__ : str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : int = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Optional[Any] = model.to(UpperCamelCase__ )
lowerCamelCase__ : Any = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Union[str, Any] = prepare_img()
lowerCamelCase__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(**UpperCamelCase__ )
lowerCamelCase__ : str = outputs.logits
# verify the logits
lowerCamelCase__ : List[str] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , UpperCamelCase__ )
lowerCamelCase__ : Any = torch.tensor(
[
[[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]],
[[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]],
[[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]],
] , device=UpperCamelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Optional[Any] = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : List[Any] = model.to(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Optional[Any] = prepare_img()
lowerCamelCase__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Dict = model(**UpperCamelCase__ )
lowerCamelCase__ : List[str] = outputs.logits.detach().cpu()
lowerCamelCase__ : List[Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ , target_sizes=[(50, 60)] )
lowerCamelCase__ : int = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ )
lowerCamelCase__ : int = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
| 631 | 0 |
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
_A : Dict ='''scheduler_config.json'''
class _lowercase ( _lowercase ):
a = 1
a = 2
a = 3
a = 4
a = 5
a = 6
a = 7
a = 8
a = 9
a = 10
a = 11
a = 12
a = 13
a = 14
@dataclass
class _lowercase ( _lowercase ):
a = 42
class _lowercase :
a = SCHEDULER_CONFIG_NAME
a = []
a = True
@classmethod
def lowerCamelCase_ ( cls: int , UpperCamelCase__: Dict[str, Any] = None , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Optional[Any]=False , **UpperCamelCase__: Union[str, Any] , ):
lowerCamelCase__ : Dict = cls.load_config(
pretrained_model_name_or_path=UpperCamelCase__ , subfolder=UpperCamelCase__ , return_unused_kwargs=UpperCamelCase__ , return_commit_hash=UpperCamelCase__ , **UpperCamelCase__ , )
return cls.from_config(UpperCamelCase__ , return_unused_kwargs=UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: Union[str, os.PathLike] , UpperCamelCase__: bool = False , **UpperCamelCase__: Optional[int] ):
self.save_config(save_directory=UpperCamelCase__ , push_to_hub=UpperCamelCase__ , **UpperCamelCase__ )
@property
def lowerCamelCase_ ( self: str ):
return self._get_compatibles()
@classmethod
def lowerCamelCase_ ( cls: str ):
lowerCamelCase__ : Union[str, Any] = list(set([cls.__name__] + cls._compatibles ) )
lowerCamelCase__ : Optional[int] = importlib.import_module(__name__.split(""".""" )[0] )
lowerCamelCase__ : List[str] = [
getattr(UpperCamelCase__ , UpperCamelCase__ ) for c in compatible_classes_str if hasattr(UpperCamelCase__ , UpperCamelCase__ )
]
return compatible_classes
| 711 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_A : Optional[Any] =logging.get_logger(__name__)
_A : Dict ={'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_A : Tuple ={
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
_A : List[Any] ={
'''gpt-neox-20b''': 2_048,
}
class _lowercase ( _lowercase ):
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = ["""input_ids""", """attention_mask"""]
def __init__( self: Optional[int] , UpperCamelCase__: Union[str, Any]=None , UpperCamelCase__: int=None , UpperCamelCase__: Tuple=None , UpperCamelCase__: Any="<|endoftext|>" , UpperCamelCase__: Any="<|endoftext|>" , UpperCamelCase__: Union[str, Any]="<|endoftext|>" , UpperCamelCase__: Tuple=False , **UpperCamelCase__: str , ):
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
lowerCamelCase__ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCamelCase__ ) != add_prefix_space:
lowerCamelCase__ : Any = getattr(UpperCamelCase__ , pre_tok_state.pop("""type""" ) )
lowerCamelCase__ : Dict = add_prefix_space
lowerCamelCase__ : Optional[int] = pre_tok_class(**UpperCamelCase__ )
lowerCamelCase__ : Dict = add_prefix_space
def lowerCamelCase_ ( self: int , UpperCamelCase__: str , UpperCamelCase__: Optional[str] = None ):
lowerCamelCase__ : Optional[Any] = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: "Conversation" ):
lowerCamelCase__ : str = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
lowerCamelCase__ : int = input_ids[-self.model_max_length :]
return input_ids
| 631 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowercase :
def __init__( self: Optional[int] , UpperCamelCase__: Optional[int] , UpperCamelCase__: str=13 , UpperCamelCase__: List[str]=32 , UpperCamelCase__: Dict=3 , UpperCamelCase__: str=4 , UpperCamelCase__: Optional[Any]=[10, 20, 30, 40] , UpperCamelCase__: Optional[Any]=[2, 2, 3, 2] , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: Optional[int]=37 , UpperCamelCase__: Optional[int]="gelu" , UpperCamelCase__: Tuple=10 , UpperCamelCase__: Union[str, Any]=0.02 , UpperCamelCase__: List[Any]=["stage2", "stage3", "stage4"] , UpperCamelCase__: Any=[2, 3, 4] , UpperCamelCase__: Optional[Any]=None , ):
lowerCamelCase__ : int = parent
lowerCamelCase__ : Union[str, Any] = batch_size
lowerCamelCase__ : Any = image_size
lowerCamelCase__ : str = num_channels
lowerCamelCase__ : Any = num_stages
lowerCamelCase__ : List[Any] = hidden_sizes
lowerCamelCase__ : Union[str, Any] = depths
lowerCamelCase__ : Dict = is_training
lowerCamelCase__ : Any = use_labels
lowerCamelCase__ : int = intermediate_size
lowerCamelCase__ : Optional[Any] = hidden_act
lowerCamelCase__ : Dict = num_labels
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : List[Any] = out_features
lowerCamelCase__ : Dict = out_indices
lowerCamelCase__ : int = scope
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : int = None
if self.use_labels:
lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase__ : List[Any] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: str ):
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: List[str] , UpperCamelCase__: int ):
lowerCamelCase__ : List[str] = ConvNextModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Union[str, Any] = model(UpperCamelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase_ ( self: str , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : Union[str, Any] = ConvNextForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: int , UpperCamelCase__: int ):
lowerCamelCase__ : Tuple = ConvNextBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : str = model(UpperCamelCase__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Optional[Any] = ConvNextBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
lowerCamelCase__ : Dict = config_and_inputs
lowerCamelCase__ : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
a = (
{"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification}
if is_torch_available()
else {}
)
a = True
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : int = ConvNextModelTester(self )
lowerCamelCase__ : List[Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self: Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self: Tuple ):
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def lowerCamelCase_ ( self: List[str] ):
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def lowerCamelCase_ ( self: Tuple ):
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def lowerCamelCase_ ( self: Optional[Any] ):
pass
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : int = model_class(UpperCamelCase__ )
lowerCamelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : int = [*signature.parameters.keys()]
lowerCamelCase__ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] ):
def check_hidden_states_output(UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : List[str] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Dict = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase__ : List[str] = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : List[str] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : Any = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: List[Any] ):
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : List[str] = ConvNextModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> List[str]:
lowerCamelCase__ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: List[str] ):
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Union[str, Any] = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(UpperCamelCase__ )
lowerCamelCase__ : Dict = self.default_image_processor
lowerCamelCase__ : int = prepare_img()
lowerCamelCase__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : List[str] = model(**UpperCamelCase__ )
# verify the logits
lowerCamelCase__ : List[Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : Dict = torch.tensor([-0.0_260, -0.4_739, 0.1_911] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@require_torch
class _lowercase ( unittest.TestCase , _lowercase ):
a = (ConvNextBackbone,) if is_torch_available() else ()
a = ConvNextConfig
a = False
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Any = ConvNextModelTester(self )
| 712 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A : Dict ={
'''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''],
'''processing_git''': ['''GitProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] =[
'''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GitForCausalLM''',
'''GitModel''',
'''GitPreTrainedModel''',
'''GitVisionModel''',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_A : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 631 | 0 |
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
_A : str =logging.getLogger(__name__)
require_version('''pytorch_lightning>=1.0.4''')
_A : Dict ={
'''base''': AutoModel,
'''sequence-classification''': AutoModelForSequenceClassification,
'''question-answering''': AutoModelForQuestionAnswering,
'''pretraining''': AutoModelForPreTraining,
'''token-classification''': AutoModelForTokenClassification,
'''language-modeling''': AutoModelWithLMHead,
'''summarization''': AutoModelForSeqaSeqLM,
'''translation''': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
_A : Optional[int] ={
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
_A : Optional[int] =sorted(arg_to_scheduler.keys())
_A : Optional[Any] ='''{''' + ''', '''.join(arg_to_scheduler_choices) + '''}'''
class _lowercase ( pl.LightningModule ):
def __init__( self: Optional[int] , UpperCamelCase__: argparse.Namespace , UpperCamelCase__: Dict=None , UpperCamelCase__: List[Any]="base" , UpperCamelCase__: Optional[Any]=None , UpperCamelCase__: Union[str, Any]=None , UpperCamelCase__: Any=None , **UpperCamelCase__: str , ):
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Union[str, Any] = Path(self.hparams.output_dir )
lowerCamelCase__ : Tuple = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
lowerCamelCase__ : Optional[Any] = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"""num_labels""": num_labels} if num_labels is not None else {}) , cache_dir=UpperCamelCase__ , **UpperCamelCase__ , )
else:
lowerCamelCase__ : PretrainedConfig = config
lowerCamelCase__ : int = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(self.hparams , UpperCamelCase__ , UpperCamelCase__ ):
assert hasattr(self.config , UpperCamelCase__ ), F'''model config doesn\'t have a `{p}` attribute'''
setattr(self.config , UpperCamelCase__ , getattr(self.hparams , UpperCamelCase__ ) )
if tokenizer is None:
lowerCamelCase__ : Optional[int] = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=UpperCamelCase__ , )
else:
lowerCamelCase__ : PreTrainedTokenizer = tokenizer
lowerCamelCase__ : List[Any] = MODEL_MODES[mode]
if model is None:
lowerCamelCase__ : Tuple = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool(""".ckpt""" in self.hparams.model_name_or_path ) , config=self.config , cache_dir=UpperCamelCase__ , )
else:
lowerCamelCase__ : int = model
def lowerCamelCase_ ( self: Any , *UpperCamelCase__: Tuple , **UpperCamelCase__: Tuple ):
lowerCamelCase__ : int = self.model_type.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : int = arg_to_scheduler[self.hparams.lr_scheduler]
lowerCamelCase__ : str = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
lowerCamelCase__ : Dict = {"""scheduler""": scheduler, """interval""": """step""", """frequency""": 1}
return scheduler
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Any = self.model
lowerCamelCase__ : int = ["""bias""", """LayerNorm.weight"""]
lowerCamelCase__ : Union[str, Any] = [
{
"""params""": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
"""weight_decay""": self.hparams.weight_decay,
},
{
"""params""": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
if self.hparams.adafactor:
lowerCamelCase__ : Union[str, Any] = Adafactor(
UpperCamelCase__ , lr=self.hparams.learning_rate , scale_parameter=UpperCamelCase__ , relative_step=UpperCamelCase__ )
else:
lowerCamelCase__ : Any = AdamW(
UpperCamelCase__ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
lowerCamelCase__ : Tuple = optimizer
lowerCamelCase__ : Optional[int] = self.get_lr_scheduler()
return [optimizer], [scheduler]
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: str , UpperCamelCase__: List[Any] ):
return self.validation_step(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: List[str] ):
return self.validation_end(UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Any = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
lowerCamelCase__ : List[Any] = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def lowerCamelCase_ ( self: str , UpperCamelCase__: List[Any] ):
if stage == "test":
lowerCamelCase__ : Union[str, Any] = len(self.test_dataloader().dataset )
else:
lowerCamelCase__ : Optional[Any] = self.get_dataloader("""train""" , self.hparams.train_batch_size , shuffle=UpperCamelCase__ )
lowerCamelCase__ : Dict = len(self.train_dataloader().dataset )
def lowerCamelCase_ ( self: int , UpperCamelCase__: str , UpperCamelCase__: int , UpperCamelCase__: bool = False ):
raise NotImplementedError("""You must implement this for your task""" )
def lowerCamelCase_ ( self: Optional[int] ):
return self.train_loader
def lowerCamelCase_ ( self: Any ):
return self.get_dataloader("""dev""" , self.hparams.eval_batch_size , shuffle=UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict ):
return self.get_dataloader("""test""" , self.hparams.eval_batch_size , shuffle=UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: Union[str, Any] ):
return os.path.join(
self.hparams.data_dir , """cached_{}_{}_{}""".format(
UpperCamelCase__ , list(filter(UpperCamelCase__ , self.hparams.model_name_or_path.split("""/""" ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Dict[str, Any] ):
lowerCamelCase__ : Optional[int] = self.output_dir.joinpath("""best_tfmr""" )
lowerCamelCase__ : Optional[Any] = self.step_count
self.model.save_pretrained(UpperCamelCase__ )
self.tokenizer.save_pretrained(UpperCamelCase__ )
@staticmethod
def lowerCamelCase_ ( UpperCamelCase__: Optional[int] , UpperCamelCase__: Union[str, Any] ):
parser.add_argument(
"""--model_name_or_path""" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--config_name""" , default="""""" , type=UpperCamelCase__ , help="""Pretrained config name or path if not the same as model_name""" )
parser.add_argument(
"""--tokenizer_name""" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument(
"""--cache_dir""" , default=str(Path(UpperCamelCase__ ).parent / """test_run""" / """cache""" ) , type=UpperCamelCase__ , help="""Where do you want to store the pre-trained models downloaded from huggingface.co""" , )
parser.add_argument(
"""--encoder_layerdrop""" , type=UpperCamelCase__ , help="""Encoder layer dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--decoder_layerdrop""" , type=UpperCamelCase__ , help="""Decoder layer dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--dropout""" , type=UpperCamelCase__ , help="""Dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--attention_dropout""" , type=UpperCamelCase__ , help="""Attention dropout probability (Optional). Goes into model.config""" , )
parser.add_argument("""--learning_rate""" , default=5e-5 , type=UpperCamelCase__ , help="""The initial learning rate for Adam.""" )
parser.add_argument(
"""--lr_scheduler""" , default="""linear""" , choices=UpperCamelCase__ , metavar=UpperCamelCase__ , type=UpperCamelCase__ , help="""Learning rate scheduler""" , )
parser.add_argument("""--weight_decay""" , default=0.0 , type=UpperCamelCase__ , help="""Weight decay if we apply some.""" )
parser.add_argument("""--adam_epsilon""" , default=1e-8 , type=UpperCamelCase__ , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--warmup_steps""" , default=0 , type=UpperCamelCase__ , help="""Linear warmup over warmup_steps.""" )
parser.add_argument("""--num_workers""" , default=4 , type=UpperCamelCase__ , help="""kwarg passed to DataLoader""" )
parser.add_argument("""--num_train_epochs""" , dest="""max_epochs""" , default=3 , type=UpperCamelCase__ )
parser.add_argument("""--train_batch_size""" , default=32 , type=UpperCamelCase__ )
parser.add_argument("""--eval_batch_size""" , default=32 , type=UpperCamelCase__ )
parser.add_argument("""--adafactor""" , action="""store_true""" )
class _lowercase ( pl.Callback ):
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: List[str] , UpperCamelCase__: int ):
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class _lowercase ( pl.Callback ):
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: str , UpperCamelCase__: Optional[Any] ):
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(UpperCamelCase__ )
class _lowercase ( pl.Callback ):
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: str ):
lowerCamelCase__ : int = trainer.lr_schedulers[0]["""scheduler"""]
lowerCamelCase__ : Optional[Any] = {F'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: pl.Trainer , UpperCamelCase__: pl.LightningModule ):
rank_zero_info("""***** Validation results *****""" )
lowerCamelCase__ : Dict = trainer.callback_metrics
# Log results
for key in sorted(UpperCamelCase__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(UpperCamelCase__ , str(metrics[key] ) ) )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: pl.Trainer , UpperCamelCase__: pl.LightningModule ):
rank_zero_info("""***** Test results *****""" )
lowerCamelCase__ : Dict = trainer.callback_metrics
# Log and save results to file
lowerCamelCase__ : Optional[Any] = os.path.join(pl_module.hparams.output_dir , """test_results.txt""" )
with open(UpperCamelCase__ , """w""" ) as writer:
for key in sorted(UpperCamelCase__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(UpperCamelCase__ , str(metrics[key] ) ) )
writer.write("""{} = {}\n""".format(UpperCamelCase__ , str(metrics[key] ) ) )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> None:
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
"""--output_dir""" , default=str(Path(UpperCamelCase ).parent / """test_run""" / """model_checkpoints""" ) , type=UpperCamelCase , help="""The output directory where the model predictions and checkpoints will be written.""" , )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=UpperCamelCase , default="""O2""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_tpu_cores""" , dest="""tpu_cores""" , type=UpperCamelCase )
parser.add_argument("""--max_grad_norm""" , dest="""gradient_clip_val""" , default=1.0 , type=UpperCamelCase , help="""Max gradient norm""" )
parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" )
parser.add_argument("""--do_predict""" , action="""store_true""" , help="""Whether to run predictions on the test set.""" )
parser.add_argument(
"""--gradient_accumulation_steps""" , dest="""accumulate_grad_batches""" , type=UpperCamelCase , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , )
parser.add_argument("""--seed""" , type=UpperCamelCase , default=42 , help="""random seed for initialization""" )
parser.add_argument(
"""--data_dir""" , default=str(Path(UpperCamelCase ).parent / """test_run""" / """dummy-train-data""" ) , type=UpperCamelCase , help="""The input data dir. Should contain the training files for the CoNLL-2003 NER task.""" , )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase=[] , UpperCamelCase=None , UpperCamelCase=None , **UpperCamelCase , ) -> Any:
pl.seed_everything(args.seed )
# init model
lowerCamelCase__ : Optional[int] = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=UpperCamelCase )
# add custom checkpoints
if checkpoint_callback is None:
lowerCamelCase__ : Optional[int] = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix="""checkpoint""" , monitor="""val_loss""" , mode="""min""" , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(UpperCamelCase )
if logging_callback is None:
lowerCamelCase__ : Optional[Any] = LoggingCallback()
lowerCamelCase__ : Optional[Any] = {}
if args.fpaa:
lowerCamelCase__ : Tuple = 16
if args.gpus > 1:
lowerCamelCase__ : List[str] = """auto"""
lowerCamelCase__ : Any = """ddp"""
lowerCamelCase__ : List[str] = args.accumulate_grad_batches
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : int = """auto"""
lowerCamelCase__ : Dict = pl.Trainer.from_argparse_args(
UpperCamelCase , weights_summary=UpperCamelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=UpperCamelCase , val_check_interval=1 , num_sanity_val_steps=2 , **UpperCamelCase , )
if args.do_train:
trainer.fit(UpperCamelCase )
else:
print("""RAG modeling tests with new set functions successfuly executed!""" )
return trainer
| 713 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
_A : int =get_tests_dir('''fixtures/test_sentencepiece.model''')
_A : Tuple ={'''target_lang''': '''fi''', '''source_lang''': '''en'''}
_A : int ='''>>zh<<'''
_A : Dict ='''Helsinki-NLP/'''
if is_torch_available():
_A : List[Any] ='''pt'''
elif is_tf_available():
_A : Optional[int] ='''tf'''
else:
_A : Dict ='''jax'''
@require_sentencepiece
class _lowercase ( _lowercase , unittest.TestCase ):
a = MarianTokenizer
a = False
a = True
def lowerCamelCase_ ( self: List[str] ):
super().setUp()
lowerCamelCase__ : List[Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
lowerCamelCase__ : Optional[Any] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
lowerCamelCase__ : Optional[int] = Path(self.tmpdirname )
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
lowerCamelCase__ : Dict = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self: Optional[Any] , **UpperCamelCase__: Any ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: str , UpperCamelCase__: List[str] ):
return (
"This is a test",
"This is a test",
)
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Any = """</s>"""
lowerCamelCase__ : List[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(UpperCamelCase__ ) , 9 )
def lowerCamelCase_ ( self: int ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : List[Any] = MarianTokenizer.from_pretrained(F'''{ORG_NAME}opus-mt-en-de''' )
lowerCamelCase__ : Optional[int] = en_de_tokenizer(["""I am a small frog"""] , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = [38, 121, 14, 697, 38_848, 0]
self.assertListEqual(UpperCamelCase__ , batch.input_ids[0] )
lowerCamelCase__ : List[str] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Tuple = [x.name for x in Path(UpperCamelCase__ ).glob("""*""" )]
self.assertIn("""source.spm""" , UpperCamelCase__ )
MarianTokenizer.from_pretrained(UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : List[Any] = self.get_tokenizer()
lowerCamelCase__ : Any = tok(
["""I am a small frog""" * 1_000, """I am a small frog"""] , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : str = self.get_tokenizer()
lowerCamelCase__ : Dict = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def lowerCamelCase_ ( self: List[str] ):
# fmt: off
lowerCamelCase__ : int = {"""input_ids""": [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Union[str, Any] = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
lowerCamelCase__ : str = """Tämä on testi"""
lowerCamelCase__ : Any = """This is a test"""
lowerCamelCase__ : int = [76, 7, 2_047, 2]
lowerCamelCase__ : List[str] = [69, 12, 11, 940, 2]
lowerCamelCase__ : Tuple = tokenizer(UpperCamelCase__ ).input_ids
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = tokenizer(text_target=UpperCamelCase__ ).input_ids
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Tuple = tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
| 631 | 0 |
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowercase ( _lowercase ):
'''simple docstring'''
a = """"""
a = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
a = None # compression type in fsspec. ex: "gzip"
a = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self: str , UpperCamelCase__: str = "" , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Optional[dict] = None , **UpperCamelCase__: List[Any] ):
super().__init__(self , **UpperCamelCase__ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCamelCase__ : List[Any] = fsspec.open(
UpperCamelCase__ , mode="""rb""" , protocol=UpperCamelCase__ , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowerCamelCase__ : str = os.path.basename(self.file.path.split("""::""" )[0] )
lowerCamelCase__ : Union[str, Any] = (
self.compressed_name[: self.compressed_name.rindex(""".""" )]
if """.""" in self.compressed_name
else self.compressed_name
)
lowerCamelCase__ : Tuple = None
@classmethod
def lowerCamelCase_ ( cls: Optional[int] , UpperCamelCase__: Optional[int] ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(UpperCamelCase__ ).lstrip("""/""" )
def lowerCamelCase_ ( self: Tuple ):
if self.dir_cache is None:
lowerCamelCase__ : Dict = {**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name}
lowerCamelCase__ : int = {f["""name"""]: f}
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: str ):
return self.file.open().read()
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: str , UpperCamelCase__: str = "rb" , UpperCamelCase__: Optional[int]=None , UpperCamelCase__: Tuple=True , UpperCamelCase__: Tuple=None , **UpperCamelCase__: Optional[Any] , ):
lowerCamelCase__ : Union[str, Any] = self._strip_protocol(UpperCamelCase__ )
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class _lowercase ( _lowercase ):
'''simple docstring'''
a = """bz2"""
a = """bz2"""
a = """.bz2"""
class _lowercase ( _lowercase ):
'''simple docstring'''
a = """gzip"""
a = """gzip"""
a = """.gz"""
class _lowercase ( _lowercase ):
'''simple docstring'''
a = """lz4"""
a = """lz4"""
a = """.lz4"""
class _lowercase ( _lowercase ):
'''simple docstring'''
a = """xz"""
a = """xz"""
a = """.xz"""
class _lowercase ( _lowercase ):
'''simple docstring'''
a = """zstd"""
a = """zstd"""
a = """.zst"""
def __init__( self: int , UpperCamelCase__: str , UpperCamelCase__: str = "rb" , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Optional[dict] = None , UpperCamelCase__: int = DEFAULT_BLOCK_SIZE , **UpperCamelCase__: Dict , ):
super().__init__(
fo=UpperCamelCase__ , mode=UpperCamelCase__ , target_protocol=UpperCamelCase__ , target_options=UpperCamelCase__ , block_size=UpperCamelCase__ , **UpperCamelCase__ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCamelCase__ : Tuple = self.file.__enter__
class _lowercase :
'''simple docstring'''
def __init__( self: Optional[int] , UpperCamelCase__: Any ):
lowerCamelCase__ : Optional[int] = file_
def __enter__( self: List[Any] ):
self._file.__enter__()
return self
def __exit__( self: Any , *UpperCamelCase__: str , **UpperCamelCase__: Any ):
self._file.__exit__(*UpperCamelCase__ , **UpperCamelCase__ )
def __iter__( self: Any ):
return iter(self._file )
def lowerCamelCase_ ( self: List[Any] ):
return next(self._file )
def __getattr__( self: List[str] , UpperCamelCase__: Dict ):
return getattr(self._file , UpperCamelCase__ )
def fixed_enter(*UpperCamelCase__: Union[str, Any] , **UpperCamelCase__: List[str] ):
return WrappedFile(_enter(*UpperCamelCase__ , **UpperCamelCase__ ) )
lowerCamelCase__ : Optional[Any] = fixed_enter
| 714 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Optional[Any] =logging.get_logger(__name__)
_A : Optional[int] ={
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class _lowercase ( _lowercase ):
a = """rwkv"""
a = {"""max_position_embeddings""": """context_length"""}
def __init__( self: Tuple , UpperCamelCase__: Optional[Any]=50_277 , UpperCamelCase__: Union[str, Any]=1_024 , UpperCamelCase__: Tuple=4_096 , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Dict=None , UpperCamelCase__: Dict=None , UpperCamelCase__: int=1e-5 , UpperCamelCase__: Any=0 , UpperCamelCase__: str=0 , UpperCamelCase__: Union[str, Any]=6 , UpperCamelCase__: Optional[int]=False , UpperCamelCase__: Dict=True , **UpperCamelCase__: Dict , ):
lowerCamelCase__ : Dict = vocab_size
lowerCamelCase__ : Optional[Any] = context_length
lowerCamelCase__ : Optional[Any] = hidden_size
lowerCamelCase__ : Any = num_hidden_layers
lowerCamelCase__ : int = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowerCamelCase__ : Union[str, Any] = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowerCamelCase__ : List[str] = layer_norm_epsilon
lowerCamelCase__ : int = rescale_every
lowerCamelCase__ : Optional[int] = use_cache
lowerCamelCase__ : Dict = bos_token_id
lowerCamelCase__ : Any = eos_token_id
super().__init__(
tie_word_embeddings=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 631 | 0 |
from math import isclose, sqrt
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> tuple[float, float, float]:
lowerCamelCase__ : str = point_y / 4 / point_x
lowerCamelCase__ : Dict = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowerCamelCase__ : Optional[int] = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowerCamelCase__ : int = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowerCamelCase__ : Union[str, Any] = outgoing_gradient**2 + 4
lowerCamelCase__ : Any = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowerCamelCase__ : int = (point_y - outgoing_gradient * point_x) ** 2 - 100
lowerCamelCase__ : List[Any] = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
lowerCamelCase__ : int = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowerCamelCase__ : List[str] = x_minus if isclose(UpperCamelCase , UpperCamelCase ) else x_plus
lowerCamelCase__ : Tuple = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = 1.4 , UpperCamelCase = -9.6 ) -> int:
lowerCamelCase__ : int = 0
lowerCamelCase__ : float = first_x_coord
lowerCamelCase__ : float = first_y_coord
lowerCamelCase__ : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
lowerCamelCase__ : int = next_point(UpperCamelCase , UpperCamelCase , UpperCamelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F'{solution() = }')
| 715 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : str =logging.get_logger(__name__)
_A : int ={
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class _lowercase ( _lowercase ):
a = """roc_bert"""
def __init__( self: Optional[Any] , UpperCamelCase__: Any=30_522 , UpperCamelCase__: Optional[Any]=768 , UpperCamelCase__: Union[str, Any]=12 , UpperCamelCase__: Tuple=12 , UpperCamelCase__: Tuple=3_072 , UpperCamelCase__: str="gelu" , UpperCamelCase__: List[Any]=0.1 , UpperCamelCase__: List[str]=0.1 , UpperCamelCase__: Dict=512 , UpperCamelCase__: str=2 , UpperCamelCase__: str=0.02 , UpperCamelCase__: Tuple=1e-12 , UpperCamelCase__: Any=True , UpperCamelCase__: Union[str, Any]=0 , UpperCamelCase__: List[Any]="absolute" , UpperCamelCase__: Any=None , UpperCamelCase__: Any=True , UpperCamelCase__: Optional[int]=True , UpperCamelCase__: Union[str, Any]=768 , UpperCamelCase__: int=910 , UpperCamelCase__: Tuple=512 , UpperCamelCase__: int=24_858 , UpperCamelCase__: Optional[Any]=True , **UpperCamelCase__: Optional[Any] , ):
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : Tuple = max_position_embeddings
lowerCamelCase__ : List[Any] = hidden_size
lowerCamelCase__ : int = num_hidden_layers
lowerCamelCase__ : Tuple = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Dict = attention_probs_dropout_prob
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : Tuple = type_vocab_size
lowerCamelCase__ : Optional[Any] = layer_norm_eps
lowerCamelCase__ : List[Any] = use_cache
lowerCamelCase__ : Tuple = enable_pronunciation
lowerCamelCase__ : Union[str, Any] = enable_shape
lowerCamelCase__ : Union[str, Any] = pronunciation_embed_dim
lowerCamelCase__ : Any = pronunciation_vocab_size
lowerCamelCase__ : int = shape_embed_dim
lowerCamelCase__ : Tuple = shape_vocab_size
lowerCamelCase__ : Optional[Any] = concat_input
lowerCamelCase__ : str = position_embedding_type
lowerCamelCase__ : Dict = classifier_dropout
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 631 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self: int , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Any=13 , UpperCamelCase__: int=7 , UpperCamelCase__: int=True , UpperCamelCase__: Dict=True , UpperCamelCase__: Optional[int]=True , UpperCamelCase__: List[Any]=True , UpperCamelCase__: Optional[int]=99 , UpperCamelCase__: Optional[int]=32 , UpperCamelCase__: Dict=5 , UpperCamelCase__: Optional[int]=4 , UpperCamelCase__: Optional[Any]=37 , UpperCamelCase__: Union[str, Any]="gelu" , UpperCamelCase__: str=0.1 , UpperCamelCase__: str=0.1 , UpperCamelCase__: List[str]=512 , UpperCamelCase__: Optional[Any]=16 , UpperCamelCase__: Union[str, Any]=2 , UpperCamelCase__: Tuple=0.02 , UpperCamelCase__: Union[str, Any]=4 , ):
lowerCamelCase__ : Any = parent
lowerCamelCase__ : Union[str, Any] = batch_size
lowerCamelCase__ : Union[str, Any] = seq_length
lowerCamelCase__ : Optional[Any] = is_training
lowerCamelCase__ : Optional[Any] = use_attention_mask
lowerCamelCase__ : List[Any] = use_token_type_ids
lowerCamelCase__ : Dict = use_labels
lowerCamelCase__ : List[Any] = vocab_size
lowerCamelCase__ : Optional[int] = hidden_size
lowerCamelCase__ : int = num_hidden_layers
lowerCamelCase__ : List[str] = num_attention_heads
lowerCamelCase__ : List[str] = intermediate_size
lowerCamelCase__ : Optional[Any] = hidden_act
lowerCamelCase__ : Tuple = hidden_dropout_prob
lowerCamelCase__ : Tuple = attention_probs_dropout_prob
lowerCamelCase__ : Optional[int] = max_position_embeddings
lowerCamelCase__ : List[Any] = type_vocab_size
lowerCamelCase__ : Tuple = type_sequence_label_size
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : List[str] = num_choices
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : List[Any] = None
if self.use_attention_mask:
lowerCamelCase__ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Optional[Any] = None
if self.use_token_type_ids:
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ : Dict = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : List[Any] = self.prepare_config_and_inputs()
lowerCamelCase__ : Dict = config_and_inputs
lowerCamelCase__ : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
lowerCamelCase__ : str = config_and_inputs
lowerCamelCase__ : int = True
lowerCamelCase__ : Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _lowercase ( _lowercase , unittest.TestCase ):
'''simple docstring'''
a = True
a = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : List[Any] = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowerCamelCase_ ( self: str ):
for model_class_name in self.all_model_classes:
lowerCamelCase__ : int = model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase__ )
@require_flax
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : List[Any] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )[0]
lowerCamelCase__ : Dict = [1, 11, 50_265]
self.assertEqual(list(output.shape ) , UpperCamelCase__ )
# compare the actual values for a slice.
lowerCamelCase__ : Union[str, Any] = np.array(
[[[40.4_880, 18.0_199, -5.2_367], [-1.8_877, -4.0_885, 10.7_085], [-2.2_613, -5.6_110, 7.2_665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Optional[int] = FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=UpperCamelCase__ )
lowerCamelCase__ : Any = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
lowerCamelCase__ : List[str] = model(UpperCamelCase__ )[0]
# compare the actual values for a slice.
lowerCamelCase__ : Optional[int] = np.array(
[[[0.0_208, -0.0_356, 0.0_237], [-0.1_569, -0.0_411, -0.2_626], [0.1_879, 0.0_125, -0.0_089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 716 |
'''simple docstring'''
import sys
import turtle
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> None:
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
_A : Any =turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
_A : Dict =[(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 631 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
_A : Any =TypeVar('''T''')
class _lowercase ( Generic[T] ):
def __init__( self: Optional[Any] , UpperCamelCase__: list[T] , UpperCamelCase__: Callable[[T, T], T] ):
lowerCamelCase__ : Any | T = None
lowerCamelCase__ : int = len(UpperCamelCase__ )
lowerCamelCase__ : list[T] = [any_type for _ in range(self.N )] + arr
lowerCamelCase__ : Any = fnc
self.build()
def lowerCamelCase_ ( self: List[Any] ):
for p in range(self.N - 1 , 0 , -1 ):
lowerCamelCase__ : Union[str, Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: int , UpperCamelCase__: T ):
p += self.N
lowerCamelCase__ : int = v
while p > 1:
lowerCamelCase__ : str = p // 2
lowerCamelCase__ : Optional[Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: int , UpperCamelCase__: int ): # noqa: E741
lowerCamelCase__ : Dict = l + self.N, r + self.N
lowerCamelCase__ : T | None = None
while l <= r:
if l % 2 == 1:
lowerCamelCase__ : str = self.st[l] if res is None else self.fn(UpperCamelCase__ , self.st[l] )
if r % 2 == 0:
lowerCamelCase__ : List[Any] = self.st[r] if res is None else self.fn(UpperCamelCase__ , self.st[r] )
lowerCamelCase__ : int = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
_A : Any =[1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
_A : Union[str, Any] ={
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
_A : int =SegmentTree(test_array, min)
_A : Tuple =SegmentTree(test_array, max)
_A : Optional[int] =SegmentTree(test_array, lambda a, b: a + b)
def SCREAMING_SNAKE_CASE_ () -> None:
for i in range(len(UpperCamelCase ) ):
for j in range(UpperCamelCase , len(UpperCamelCase ) ):
lowerCamelCase__ : Optional[int] = reduce(UpperCamelCase , test_array[i : j + 1] )
lowerCamelCase__ : Tuple = reduce(UpperCamelCase , test_array[i : j + 1] )
lowerCamelCase__ : Optional[int] = reduce(lambda UpperCamelCase , UpperCamelCase : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(UpperCamelCase , UpperCamelCase )
assert max_range == max_segment_tree.query(UpperCamelCase , UpperCamelCase )
assert sum_range == sum_segment_tree.query(UpperCamelCase , UpperCamelCase )
test_all_segments()
for index, value in test_updates.items():
_A : Optional[Any] =value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 717 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _lowercase :
def __init__( self: int , UpperCamelCase__: Dict , UpperCamelCase__: List[str]=13 , UpperCamelCase__: Union[str, Any]=7 , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: List[Any]=True , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: int=True , UpperCamelCase__: List[Any]=99 , UpperCamelCase__: Tuple=32 , UpperCamelCase__: List[str]=2 , UpperCamelCase__: Optional[Any]=4 , UpperCamelCase__: Optional[int]=37 , UpperCamelCase__: Any="gelu" , UpperCamelCase__: Any=0.1 , UpperCamelCase__: int=0.1 , UpperCamelCase__: Optional[Any]=512 , UpperCamelCase__: List[str]=16 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: Dict=0.02 , UpperCamelCase__: Tuple=3 , UpperCamelCase__: Optional[int]=4 , UpperCamelCase__: Union[str, Any]=None , ):
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Union[str, Any] = 13
lowerCamelCase__ : Any = 7
lowerCamelCase__ : int = True
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : Dict = True
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : str = 99
lowerCamelCase__ : Dict = 384
lowerCamelCase__ : Optional[Any] = 2
lowerCamelCase__ : Optional[int] = 4
lowerCamelCase__ : Optional[Any] = 37
lowerCamelCase__ : Union[str, Any] = """gelu"""
lowerCamelCase__ : int = 0.1
lowerCamelCase__ : Optional[Any] = 0.1
lowerCamelCase__ : List[Any] = 512
lowerCamelCase__ : Optional[Any] = 16
lowerCamelCase__ : Any = 2
lowerCamelCase__ : Optional[Any] = 0.02
lowerCamelCase__ : int = 3
lowerCamelCase__ : List[str] = 4
lowerCamelCase__ : Any = 128
lowerCamelCase__ : List[Any] = 2
lowerCamelCase__ : Optional[Any] = 9
lowerCamelCase__ : Any = 1
lowerCamelCase__ : Optional[int] = None
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : str = None
if self.use_input_mask:
lowerCamelCase__ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : List[str] = None
if self.use_token_type_ids:
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ : int = None
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Optional[Any] = None
if self.use_labels:
lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : Any = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : List[Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCamelCase__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Dict , UpperCamelCase__: List[str] , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: str , UpperCamelCase__: Any ):
lowerCamelCase__ : List[Any] = TFConvBertModel(config=UpperCamelCase__ )
lowerCamelCase__ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCamelCase__ : List[str] = [input_ids, input_mask]
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: Tuple , UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple ):
lowerCamelCase__ : int = TFConvBertForMaskedLM(config=UpperCamelCase__ )
lowerCamelCase__ : Tuple = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : int = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self: str , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : int = self.num_labels
lowerCamelCase__ : Dict = TFConvBertForSequenceClassification(config=UpperCamelCase__ )
lowerCamelCase__ : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[str] , UpperCamelCase__: int , UpperCamelCase__: List[str] , UpperCamelCase__: Dict ):
lowerCamelCase__ : Optional[int] = self.num_choices
lowerCamelCase__ : Dict = TFConvBertForMultipleChoice(config=UpperCamelCase__ )
lowerCamelCase__ : int = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ : List[str] = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ : Any = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ : Tuple = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self: str , UpperCamelCase__: Any , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Optional[int] , UpperCamelCase__: str , UpperCamelCase__: int ):
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : List[str] = TFConvBertForTokenClassification(config=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: List[Any] , UpperCamelCase__: str , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : Optional[int] = TFConvBertForQuestionAnswering(config=UpperCamelCase__ )
lowerCamelCase__ : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : str = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : str = config_and_inputs
lowerCamelCase__ : Optional[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
a = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a = False
a = False
a = False
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Dict = TFConvBertModelTester(self )
lowerCamelCase__ : Dict = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self: List[str] ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Dict = True
lowerCamelCase__ : Tuple = True
if hasattr(UpperCamelCase__ , """use_cache""" ):
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : List[str] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
lowerCamelCase__ : Tuple = getattr(self.model_tester , """key_length""" , UpperCamelCase__ )
for model_class in self.all_model_classes:
lowerCamelCase__ : int = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : Dict = len(model(UpperCamelCase__ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ , saved_model=UpperCamelCase__ )
lowerCamelCase__ : int = os.path.join(UpperCamelCase__ , """saved_model""" , """1""" )
lowerCamelCase__ : List[Any] = tf.keras.models.load_model(UpperCamelCase__ )
lowerCamelCase__ : Any = model(UpperCamelCase__ )
if self.is_encoder_decoder:
lowerCamelCase__ : Dict = outputs["""encoder_hidden_states"""]
lowerCamelCase__ : Any = outputs["""encoder_attentions"""]
else:
lowerCamelCase__ : int = outputs["""hidden_states"""]
lowerCamelCase__ : Optional[int] = outputs["""attentions"""]
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Union[str, Any] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : int = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
lowerCamelCase__ : Any = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
lowerCamelCase__ : Optional[int] = getattr(self.model_tester , """key_length""" , UpperCamelCase__ )
lowerCamelCase__ : List[Any] = getattr(self.model_tester , """key_length""" , UpperCamelCase__ )
def check_decoder_attentions_output(UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : List[Any] = len(UpperCamelCase__ )
self.assertEqual(out_len % 2 , 0 )
lowerCamelCase__ : Any = outputs.decoder_attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(UpperCamelCase__: List[str] ):
lowerCamelCase__ : Any = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
lowerCamelCase__ : int = True
lowerCamelCase__ : Any = False
lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : List[str] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Optional[int] = len(UpperCamelCase__ )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
if self.is_encoder_decoder:
lowerCamelCase__ : str = model_class(UpperCamelCase__ )
lowerCamelCase__ : Tuple = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_decoder_attentions_output(UpperCamelCase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Dict = model_class(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
# Check attention is always last and order is fine
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : int = True
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCamelCase__ ) )
self.assertEqual(model.config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
@require_tf
class _lowercase ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Dict = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
lowerCamelCase__ : Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )[0]
lowerCamelCase__ : Dict = [1, 6, 768]
self.assertEqual(output.shape , UpperCamelCase__ )
lowerCamelCase__ : Dict = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 )
| 631 | 0 |
'''simple docstring'''
from timeit import timeit
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> int:
if number < 0:
raise ValueError("""the value of input must not be negative""" )
lowerCamelCase__ : Optional[int] = 0
while number:
number &= number - 1
result += 1
return result
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> int:
if number < 0:
raise ValueError("""the value of input must not be negative""" )
lowerCamelCase__ : Optional[Any] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def SCREAMING_SNAKE_CASE_ () -> None:
def do_benchmark(UpperCamelCase ) -> None:
lowerCamelCase__ : Union[str, Any] = """import __main__ as z"""
print(f'''Benchmark when {number = }:''' )
print(f'''{get_set_bits_count_using_modulo_operator(UpperCamelCase ) = }''' )
lowerCamelCase__ : Union[str, Any] = timeit("""z.get_set_bits_count_using_modulo_operator(25)""" , setup=UpperCamelCase )
print(f'''timeit() runs in {timing} seconds''' )
print(f'''{get_set_bits_count_using_brian_kernighans_algorithm(UpperCamelCase ) = }''' )
lowerCamelCase__ : int = timeit(
"""z.get_set_bits_count_using_brian_kernighans_algorithm(25)""" , setup=UpperCamelCase , )
print(f'''timeit() runs in {timing} seconds''' )
for number in (25, 37, 58, 0):
do_benchmark(UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 718 |
'''simple docstring'''
_A : List[str] ='''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 631 | 0 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
_A : Tuple ='''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
_A : Union[str, Any] ='''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
_A : Optional[Any] =r'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
def lowerCamelCase_ ( self: Any ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: str , UpperCamelCase__: Dict , UpperCamelCase__: Union[str, Any]=False ):
lowerCamelCase__ : Optional[Any] = spearmanr(UpperCamelCase__ , UpperCamelCase__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 719 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Any =logging.get_logger(__name__)
_A : Dict ={
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class _lowercase ( _lowercase ):
a = """trocr"""
a = ["""past_key_values"""]
a = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self: Optional[Any] , UpperCamelCase__: int=50_265 , UpperCamelCase__: int=1_024 , UpperCamelCase__: Optional[Any]=12 , UpperCamelCase__: Dict=16 , UpperCamelCase__: int=4_096 , UpperCamelCase__: Tuple="gelu" , UpperCamelCase__: int=512 , UpperCamelCase__: Dict=0.1 , UpperCamelCase__: Tuple=0.0 , UpperCamelCase__: str=0.0 , UpperCamelCase__: Any=2 , UpperCamelCase__: Dict=0.02 , UpperCamelCase__: Optional[Any]=0.0 , UpperCamelCase__: str=True , UpperCamelCase__: Tuple=False , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: Tuple=True , UpperCamelCase__: Dict=1 , UpperCamelCase__: List[str]=0 , UpperCamelCase__: Union[str, Any]=2 , **UpperCamelCase__: str , ):
lowerCamelCase__ : Any = vocab_size
lowerCamelCase__ : Tuple = d_model
lowerCamelCase__ : Any = decoder_layers
lowerCamelCase__ : Dict = decoder_attention_heads
lowerCamelCase__ : str = decoder_ffn_dim
lowerCamelCase__ : Tuple = activation_function
lowerCamelCase__ : Dict = max_position_embeddings
lowerCamelCase__ : int = dropout
lowerCamelCase__ : int = attention_dropout
lowerCamelCase__ : List[Any] = activation_dropout
lowerCamelCase__ : Union[str, Any] = init_std
lowerCamelCase__ : Optional[int] = decoder_layerdrop
lowerCamelCase__ : Dict = use_cache
lowerCamelCase__ : Any = scale_embedding
lowerCamelCase__ : Optional[int] = use_learned_position_embeddings
lowerCamelCase__ : List[str] = layernorm_embedding
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
| 631 | 0 |
'''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class _lowercase ( _lowercase ):
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Optional[int] = tempfile.mkdtemp()
lowerCamelCase__ : int = 5
# Realm tok
lowerCamelCase__ : List[str] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""test""",
"""question""",
"""this""",
"""is""",
"""the""",
"""first""",
"""second""",
"""third""",
"""fourth""",
"""fifth""",
"""record""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowerCamelCase__ : List[str] = os.path.join(self.tmpdirname , """realm_tokenizer""" )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = os.path.join(UpperCamelCase__ , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowerCamelCase__ : str = os.path.join(self.tmpdirname , """realm_block_records""" )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , """realm_tokenizer""" ) )
def lowerCamelCase_ ( self: Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : int = RealmConfig(num_block_records=self.num_block_records )
return config
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Tuple = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""question""": ["""foo""", """bar"""],
"""answers""": [["""Foo""", """Bar"""], ["""Bar"""]],
} )
return dataset
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Union[str, Any] = np.array(
[
b"""This is the first record""",
b"""This is the second record""",
b"""This is the third record""",
b"""This is the fourth record""",
b"""This is the fifth record""",
b"""This is a longer longer longer record""",
] , dtype=UpperCamelCase__ , )
return block_records
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : str = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : int = self.get_config()
lowerCamelCase__ : Tuple = self.get_dummy_retriever()
lowerCamelCase__ : Any = retriever.tokenizer
lowerCamelCase__ : int = np.array([0, 3] , dtype="""long""" )
lowerCamelCase__ : List[Any] = tokenizer(["""Test question"""] ).input_ids
lowerCamelCase__ : Dict = tokenizer(
["""the fourth"""] , add_special_tokens=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , ).input_ids
lowerCamelCase__ : int = config.reader_seq_len
lowerCamelCase__ : Tuple = retriever(
UpperCamelCase__ , UpperCamelCase__ , answer_ids=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors="""np""" )
self.assertEqual(len(UpperCamelCase__ ) , 2 )
self.assertEqual(len(UpperCamelCase__ ) , 2 )
self.assertEqual(len(UpperCamelCase__ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """first""", """record""", """[SEP]"""] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """fourth""", """record""", """[SEP]"""] , )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Tuple = self.get_config()
lowerCamelCase__ : Tuple = self.get_dummy_retriever()
lowerCamelCase__ : List[Any] = retriever.tokenizer
lowerCamelCase__ : str = np.array([0, 3, 5] , dtype="""long""" )
lowerCamelCase__ : Dict = tokenizer(["""Test question"""] ).input_ids
lowerCamelCase__ : str = tokenizer(
["""the fourth""", """longer longer"""] , add_special_tokens=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , ).input_ids
lowerCamelCase__ : Dict = config.reader_seq_len
lowerCamelCase__ : Union[str, Any] = retriever(
UpperCamelCase__ , UpperCamelCase__ , answer_ids=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors="""np""" )
self.assertEqual([False, True, True] , UpperCamelCase__ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , UpperCamelCase__ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : int = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) )
# Test local path
lowerCamelCase__ : Tuple = retriever.from_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) )
self.assertEqual(retriever.block_records[0] , b"""This is the first record""" )
# Test mocked remote path
with patch("""transformers.models.realm.retrieval_realm.hf_hub_download""" ) as mock_hf_hub_download:
lowerCamelCase__ : Union[str, Any] = os.path.join(
os.path.join(self.tmpdirname , """realm_block_records""" ) , _REALM_BLOCK_RECORDS_FILENAME )
lowerCamelCase__ : Optional[int] = RealmRetriever.from_pretrained("""google/realm-cc-news-pretrained-openqa""" )
self.assertEqual(retriever.block_records[0] , b"""This is the first record""" )
| 720 |
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[Any]:
lowerCamelCase__ : str = [False] * len(UpperCamelCase )
lowerCamelCase__ : str = [-1] * len(UpperCamelCase )
def dfs(UpperCamelCase , UpperCamelCase ):
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Union[str, Any] = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCamelCase , 1 - c )
for i in range(len(UpperCamelCase ) ):
if not visited[i]:
dfs(UpperCamelCase , 0 )
for i in range(len(UpperCamelCase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_A : int ={0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 631 | 0 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> list:
if len(UpperCamelCase ) == 0:
return []
lowerCamelCase__ : Dict = min(UpperCamelCase ), max(UpperCamelCase )
lowerCamelCase__ : Optional[int] = int(max_value - min_value ) + 1
lowerCamelCase__ : list[list] = [[] for _ in range(UpperCamelCase )]
for i in my_list:
buckets[int(i - min_value )].append(UpperCamelCase )
return [v for bucket in buckets for v in sorted(UpperCamelCase )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 721 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class _lowercase ( _lowercase ):
def __init__( self: Optional[Any] , UpperCamelCase__: Any , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : Optional[int] = dataset
lowerCamelCase__ : Optional[int] = process
lowerCamelCase__ : List[str] = params
def __len__( self: List[str] ):
return len(self.dataset )
def __getitem__( self: Any , UpperCamelCase__: int ):
lowerCamelCase__ : Dict = self.dataset[i]
lowerCamelCase__ : Union[str, Any] = self.process(UpperCamelCase__ , **self.params )
return processed
class _lowercase ( _lowercase ):
def __init__( self: Optional[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: str , UpperCamelCase__: Tuple , UpperCamelCase__: Any=None ):
lowerCamelCase__ : int = loader
lowerCamelCase__ : str = infer
lowerCamelCase__ : Optional[int] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : int = loader_batch_size
# Internal bookkeeping
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Optional[Any] = None
def __len__( self: Dict ):
return len(self.loader )
def __iter__( self: Optional[int] ):
lowerCamelCase__ : List[Any] = iter(self.loader )
return self
def lowerCamelCase_ ( self: Any ):
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
lowerCamelCase__ : str = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
lowerCamelCase__ : int = {}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Convert ModelOutput to tuple first
lowerCamelCase__ : str = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
lowerCamelCase__ : Tuple = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowerCamelCase__ : str = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
lowerCamelCase__ : List[str] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowerCamelCase__ : int = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
lowerCamelCase__ : List[str] = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCamelCase__ : Optional[Any] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCamelCase__ : int = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
lowerCamelCase__ : str = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
lowerCamelCase__ : Optional[int] = self._loader_batch_data.__class__(UpperCamelCase__ )
self._loader_batch_index += 1
return result
def lowerCamelCase_ ( self: List[Any] ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
lowerCamelCase__ : Optional[Any] = next(self.iterator )
lowerCamelCase__ : List[str] = self.infer(UpperCamelCase__ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCamelCase__ , torch.Tensor ):
lowerCamelCase__ : Optional[Any] = processed
else:
lowerCamelCase__ : Union[str, Any] = list(processed.keys() )[0]
lowerCamelCase__ : Any = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ : Any = len(UpperCamelCase__ )
else:
lowerCamelCase__ : List[str] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCamelCase__ : Union[str, Any] = observed_batch_size
# Setting internal index to unwrap the batch
lowerCamelCase__ : List[Any] = processed
lowerCamelCase__ : List[Any] = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class _lowercase ( _lowercase ):
def __init__( self: List[str] , UpperCamelCase__: Any , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any]=None ):
super().__init__(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __iter__( self: Union[str, Any] ):
lowerCamelCase__ : str = iter(self.loader )
lowerCamelCase__ : int = None
return self
def lowerCamelCase_ ( self: str ):
if self.subiterator is None:
lowerCamelCase__ : Optional[Any] = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
lowerCamelCase__ : Tuple = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
lowerCamelCase__ : Any = self.infer(next(self.iterator ) , **self.params )
lowerCamelCase__ : Union[str, Any] = next(self.subiterator )
return processed
class _lowercase ( _lowercase ):
def __iter__( self: List[Any] ):
lowerCamelCase__ : int = iter(self.loader )
return self
def lowerCamelCase_ ( self: Tuple ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
lowerCamelCase__ : List[str] = False
lowerCamelCase__ : Union[str, Any] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
lowerCamelCase__ : Any = self.loader_batch_item()
lowerCamelCase__ : Tuple = item.pop("""is_last""" )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
while not is_last:
lowerCamelCase__ : str = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(UpperCamelCase__ , torch.Tensor ):
lowerCamelCase__ : Dict = processed
else:
lowerCamelCase__ : Dict = list(processed.keys() )[0]
lowerCamelCase__ : Dict = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ : List[Any] = len(UpperCamelCase__ )
else:
lowerCamelCase__ : Dict = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCamelCase__ : str = observed_batch_size
lowerCamelCase__ : str = processed
lowerCamelCase__ : Optional[int] = 0
while self._loader_batch_index < self.loader_batch_size:
lowerCamelCase__ : List[Any] = self.loader_batch_item()
lowerCamelCase__ : str = item.pop("""is_last""" )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
else:
lowerCamelCase__ : Optional[Any] = processed
lowerCamelCase__ : Optional[int] = item.pop("""is_last""" )
accumulator.append(UpperCamelCase__ )
return accumulator
class _lowercase ( _lowercase ):
def __init__( self: Optional[int] , UpperCamelCase__: Dataset , UpperCamelCase__: str ):
lowerCamelCase__ : Union[str, Any] = dataset
lowerCamelCase__ : str = key
def __len__( self: Optional[Any] ):
return len(self.dataset )
def __getitem__( self: List[str] , UpperCamelCase__: Any ):
return self.dataset[i][self.key]
class _lowercase ( _lowercase ):
def __init__( self: Optional[int] , UpperCamelCase__: Dataset , UpperCamelCase__: str , UpperCamelCase__: str ):
lowerCamelCase__ : str = dataset
lowerCamelCase__ : Dict = keya
lowerCamelCase__ : List[str] = keya
def __len__( self: str ):
return len(self.dataset )
def __getitem__( self: List[str] , UpperCamelCase__: Union[str, Any] ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 631 | 0 |
'''simple docstring'''
import os
def SCREAMING_SNAKE_CASE_ () -> Tuple:
with open(os.path.dirname(UpperCamelCase ) + """/p022_names.txt""" ) as file:
lowerCamelCase__ : Optional[Any] = str(file.readlines()[0] )
lowerCamelCase__ : Dict = names.replace("""\"""" , """""" ).split(""",""" )
names.sort()
lowerCamelCase__ : Any = 0
lowerCamelCase__ : Optional[Any] = 0
for i, name in enumerate(UpperCamelCase ):
for letter in name:
name_score += ord(UpperCamelCase ) - 64
total_score += (i + 1) * name_score
lowerCamelCase__ : Dict = 0
return total_score
if __name__ == "__main__":
print(solution())
| 700 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
_A : Dict ='''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 631 | 0 |
'''simple docstring'''
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
_A : int =datasets.logging.get_logger(__name__)
_A : Any ='''\
@inproceedings{bleurt,
title={BLEURT: Learning Robust Metrics for Text Generation},
author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},
booktitle={ACL},
year={2020},
url={https://arxiv.org/abs/2004.04696}
}
'''
_A : Optional[int] ='''\
BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)
and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune
it for your specific application (the latter is expected to perform better).
See the project\'s README at https://github.com/google-research/bleurt#readme for more information.
'''
_A : Tuple ='''
BLEURT score.
Args:
`predictions` (list of str): prediction/candidate sentences
`references` (list of str): reference sentences
`checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.
Returns:
\'scores\': List of scores.
Examples:
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> bleurt = datasets.load_metric("bleurt")
>>> results = bleurt.compute(predictions=predictions, references=references)
>>> print([round(v, 2) for v in results["scores"]])
[1.03, 1.04]
'''
_A : Optional[Any] ={
'''bleurt-tiny-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip''',
'''bleurt-tiny-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip''',
'''bleurt-base-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip''',
'''bleurt-base-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip''',
'''bleurt-large-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip''',
'''bleurt-large-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip''',
'''BLEURT-20-D3''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip''',
'''BLEURT-20-D6''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip''',
'''BLEURT-20-D12''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip''',
'''BLEURT-20''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip''',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
def lowerCamelCase_ ( self: Union[str, Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/google-research/bleurt""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/google-research/bleurt"""] , reference_urls=["""https://github.com/google-research/bleurt""", """https://arxiv.org/abs/2004.04696"""] , )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: Dict ):
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
"""Using default BLEURT-Base checkpoint for sequence maximum length 128. """
"""You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512').""" )
lowerCamelCase__ : str = """bleurt-base-128"""
if self.config_name.lower() in CHECKPOINT_URLS:
lowerCamelCase__ : int = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
lowerCamelCase__ : List[Any] = self.config_name.upper()
else:
raise KeyError(
F'''{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}''' )
# download the model checkpoint specified by self.config_name and set up the scorer
lowerCamelCase__ : Union[str, Any] = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
lowerCamelCase__ : Optional[int] = score.BleurtScorer(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: Any , UpperCamelCase__: Any ):
lowerCamelCase__ : int = self.scorer.score(references=UpperCamelCase__ , candidates=UpperCamelCase__ )
return {"scores": scores}
| 701 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_A : Any ={
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_A : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 631 | 0 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
lowerCamelCase__ : List[Any] = 0
if start < end:
lowerCamelCase__ : Optional[int] = randint(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Optional[int] = a[end]
lowerCamelCase__ : Any = a[pivot]
lowerCamelCase__ : str = temp
lowerCamelCase__ : Tuple = _in_place_partition(UpperCamelCase , UpperCamelCase , UpperCamelCase )
count += _in_place_quick_sort(UpperCamelCase , UpperCamelCase , p - 1 )
count += _in_place_quick_sort(UpperCamelCase , p + 1 , UpperCamelCase )
return count
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCamelCase__ : Any = 0
lowerCamelCase__ : Union[str, Any] = randint(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Tuple = a[end]
lowerCamelCase__ : Optional[Any] = a[pivot]
lowerCamelCase__ : int = temp
lowerCamelCase__ : Optional[int] = start - 1
for index in range(UpperCamelCase , UpperCamelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowerCamelCase__ : Any = new_pivot_index + 1
lowerCamelCase__ : List[Any] = a[new_pivot_index]
lowerCamelCase__ : List[str] = a[index]
lowerCamelCase__ : Optional[Any] = temp
lowerCamelCase__ : Tuple = a[new_pivot_index + 1]
lowerCamelCase__ : List[str] = a[end]
lowerCamelCase__ : List[Any] = temp
return new_pivot_index + 1, count
_A : Dict =TemporaryFile()
_A : str =100 # 1000 elements are to be sorted
_A : Dict =0, 1 # mean and standard deviation
_A : Any =np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
_A : List[str] =np.load(outfile)
_A : Optional[int] =len(M) - 1
_A : int =_in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 702 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Union[str, Any] =logging.get_logger(__name__)
_A : List[str] ={
'''MIT/ast-finetuned-audioset-10-10-0.4593''': (
'''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'''
),
}
class _lowercase ( _lowercase ):
a = """audio-spectrogram-transformer"""
def __init__( self: str , UpperCamelCase__: Any=768 , UpperCamelCase__: Union[str, Any]=12 , UpperCamelCase__: List[Any]=12 , UpperCamelCase__: int=3_072 , UpperCamelCase__: Optional[Any]="gelu" , UpperCamelCase__: Optional[int]=0.0 , UpperCamelCase__: Tuple=0.0 , UpperCamelCase__: Union[str, Any]=0.02 , UpperCamelCase__: Dict=1e-12 , UpperCamelCase__: List[str]=16 , UpperCamelCase__: List[str]=True , UpperCamelCase__: Any=10 , UpperCamelCase__: List[str]=10 , UpperCamelCase__: Any=1_024 , UpperCamelCase__: Optional[Any]=128 , **UpperCamelCase__: Union[str, Any] , ):
super().__init__(**UpperCamelCase__ )
lowerCamelCase__ : List[Any] = hidden_size
lowerCamelCase__ : int = num_hidden_layers
lowerCamelCase__ : List[str] = num_attention_heads
lowerCamelCase__ : Optional[int] = intermediate_size
lowerCamelCase__ : List[Any] = hidden_act
lowerCamelCase__ : List[Any] = hidden_dropout_prob
lowerCamelCase__ : str = attention_probs_dropout_prob
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : List[str] = layer_norm_eps
lowerCamelCase__ : List[Any] = patch_size
lowerCamelCase__ : List[str] = qkv_bias
lowerCamelCase__ : Dict = frequency_stride
lowerCamelCase__ : List[Any] = time_stride
lowerCamelCase__ : str = max_length
lowerCamelCase__ : Dict = num_mel_bins
| 631 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _lowercase ( _lowercase ):
@staticmethod
@abstractmethod
def lowerCamelCase_ ( UpperCamelCase__: ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def lowerCamelCase_ ( self: List[Any] ):
raise NotImplementedError()
| 703 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
_A : List[str] ='''examples/'''
_A : Any ={
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
_A : int ={
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
_A : int ='''README.md'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
with open(UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ : List[str] = f.read()
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = REPLACE_PATTERNS[pattern]
lowerCamelCase__ : Dict = replace.replace("""VERSION""" , UpperCamelCase )
lowerCamelCase__ : str = re_pattern.sub(UpperCamelCase , UpperCamelCase )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
for folder, directories, fnames in os.walk(UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(UpperCamelCase , UpperCamelCase ) , UpperCamelCase , pattern="""examples""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> List[Any]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCamelCase , UpperCamelCase , UpperCamelCase )
if not patch:
update_version_in_examples(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Optional[Any]:
lowerCamelCase__ : Dict = """🤗 Transformers currently provides the following architectures"""
lowerCamelCase__ : Dict = """1. Want to contribute a new model?"""
with open(UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ : int = f.readlines()
# Find the start of the list.
lowerCamelCase__ : Optional[int] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCamelCase__ : Optional[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
lowerCamelCase__ : List[Any] = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Optional[Any]:
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
lowerCamelCase__ : int = f.read()
lowerCamelCase__ : Optional[Any] = REPLACE_PATTERNS["""init"""][0].search(UpperCamelCase ).groups()[0]
return packaging.version.parse(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase=False ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
lowerCamelCase__ : List[str] = default_version.base_version
elif patch:
lowerCamelCase__ : Any = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
lowerCamelCase__ : List[Any] = f'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
lowerCamelCase__ : Any = input(f'''Which version are you releasing? [{default_version}]''' )
if len(UpperCamelCase ) == 0:
lowerCamelCase__ : Optional[int] = default_version
print(f'''Updating version to {version}.''' )
global_version_update(UpperCamelCase , patch=UpperCamelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def SCREAMING_SNAKE_CASE_ () -> List[str]:
lowerCamelCase__ : Optional[int] = get_version()
lowerCamelCase__ : Any = f'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
lowerCamelCase__ : Any = current_version.base_version
# Check with the user we got that right.
lowerCamelCase__ : List[Any] = input(f'''Which version are we developing now? [{dev_version}]''' )
if len(UpperCamelCase ) == 0:
lowerCamelCase__ : Dict = dev_version
print(f'''Updating version to {version}.''' )
global_version_update(UpperCamelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_A : List[Any] =argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
_A : List[str] =parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 631 | 0 |
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class _lowercase ( _lowercase ):
a = (CMStochasticIterativeScheduler,)
a = 10
def lowerCamelCase_ ( self: List[Any] , **UpperCamelCase__: int ):
lowerCamelCase__ : Optional[int] = {
"""num_train_timesteps""": 201,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
config.update(**UpperCamelCase__ )
return config
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : List[str] = 10
lowerCamelCase__ : int = self.get_scheduler_config()
lowerCamelCase__ : Optional[int] = self.scheduler_classes[0](**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
lowerCamelCase__ : Tuple = scheduler.timesteps[0]
lowerCamelCase__ : Optional[Any] = scheduler.timesteps[1]
lowerCamelCase__ : str = self.dummy_sample
lowerCamelCase__ : Dict = 0.1 * sample
lowerCamelCase__ : Tuple = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
lowerCamelCase__ : Union[str, Any] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCamelCase_ ( self: Optional[int] ):
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Any = self.scheduler_classes[0]
lowerCamelCase__ : Optional[Any] = self.get_scheduler_config()
lowerCamelCase__ : int = scheduler_class(**UpperCamelCase__ )
lowerCamelCase__ : int = 1
scheduler.set_timesteps(UpperCamelCase__ )
lowerCamelCase__ : Dict = scheduler.timesteps
lowerCamelCase__ : Any = torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] = self.dummy_model()
lowerCamelCase__ : int = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(UpperCamelCase__ ):
# 1. scale model input
lowerCamelCase__ : int = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
# 2. predict noise residual
lowerCamelCase__ : str = model(UpperCamelCase__ , UpperCamelCase__ )
# 3. predict previous sample x_t-1
lowerCamelCase__ : Union[str, Any] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
lowerCamelCase__ : Optional[Any] = pred_prev_sample
lowerCamelCase__ : Optional[int] = torch.sum(torch.abs(UpperCamelCase__ ) )
lowerCamelCase__ : Any = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 192.7_614 ) < 1e-2
assert abs(result_mean.item() - 0.2_510 ) < 1e-3
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : str = self.scheduler_classes[0]
lowerCamelCase__ : Optional[Any] = self.get_scheduler_config()
lowerCamelCase__ : Optional[Any] = scheduler_class(**UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = [106, 0]
scheduler.set_timesteps(timesteps=UpperCamelCase__ )
lowerCamelCase__ : List[Any] = scheduler.timesteps
lowerCamelCase__ : Any = torch.manual_seed(0 )
lowerCamelCase__ : int = self.dummy_model()
lowerCamelCase__ : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
lowerCamelCase__ : str = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
# 2. predict noise residual
lowerCamelCase__ : Tuple = model(UpperCamelCase__ , UpperCamelCase__ )
# 3. predict previous sample x_t-1
lowerCamelCase__ : Tuple = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
lowerCamelCase__ : Dict = pred_prev_sample
lowerCamelCase__ : int = torch.sum(torch.abs(UpperCamelCase__ ) )
lowerCamelCase__ : Union[str, Any] = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 347.6_357 ) < 1e-2
assert abs(result_mean.item() - 0.4_527 ) < 1e-3
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Dict = self.scheduler_classes[0]
lowerCamelCase__ : int = self.get_scheduler_config()
lowerCamelCase__ : Any = scheduler_class(**UpperCamelCase__ )
lowerCamelCase__ : List[Any] = [39, 30, 12, 15, 0]
with self.assertRaises(UpperCamelCase__ , msg="""`timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Optional[Any] = self.scheduler_classes[0]
lowerCamelCase__ : str = self.get_scheduler_config()
lowerCamelCase__ : str = scheduler_class(**UpperCamelCase__ )
lowerCamelCase__ : str = [39, 30, 12, 1, 0]
lowerCamelCase__ : Optional[Any] = len(UpperCamelCase__ )
with self.assertRaises(UpperCamelCase__ , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=UpperCamelCase__ , timesteps=UpperCamelCase__ )
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Dict = self.scheduler_classes[0]
lowerCamelCase__ : Optional[int] = self.get_scheduler_config()
lowerCamelCase__ : int = scheduler_class(**UpperCamelCase__ )
lowerCamelCase__ : int = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCamelCase__ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=UpperCamelCase__ )
| 704 |
'''simple docstring'''
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_A : Union[str, Any] =False
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: str=32 ):
set_seed(0 )
lowerCamelCase__ : Optional[int] = UNetaDModel(sample_size=UpperCamelCase__ , in_channels=3 , out_channels=3 )
lowerCamelCase__ : List[Any] = torch.optim.SGD(model.parameters() , lr=0.0_001 )
return model, optimizer
@slow
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Optional[Any] = """cpu""" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
lowerCamelCase__ : List[Any] = DDPMScheduler(
num_train_timesteps=1_000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=UpperCamelCase__ , )
lowerCamelCase__ : Any = DDIMScheduler(
num_train_timesteps=1_000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=UpperCamelCase__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
lowerCamelCase__ : str = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(UpperCamelCase__ ) for _ in range(4 )]
lowerCamelCase__ : Tuple = [torch.randn((4, 3, 32, 32) ).to(UpperCamelCase__ ) for _ in range(4 )]
lowerCamelCase__ : Tuple = [torch.randint(0 , 1_000 , (4,) ).long().to(UpperCamelCase__ ) for _ in range(4 )]
# train with a DDPM scheduler
lowerCamelCase__ , lowerCamelCase__ : Any = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
lowerCamelCase__ : str = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCamelCase__ : str = model(UpperCamelCase__ , timesteps[i] ).sample
lowerCamelCase__ : Tuple = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
lowerCamelCase__ : Optional[Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCamelCase__ : Dict = model(UpperCamelCase__ , timesteps[i] ).sample
lowerCamelCase__ : Union[str, Any] = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
| 631 | 0 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class _lowercase :
def __init__( self: Dict , UpperCamelCase__: str , UpperCamelCase__: Dict=3 , UpperCamelCase__: Union[str, Any]=7 , UpperCamelCase__: Tuple=True , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: Any=False , UpperCamelCase__: Any=True , UpperCamelCase__: Tuple=99 , UpperCamelCase__: Optional[Any]=32 , UpperCamelCase__: Any=5 , UpperCamelCase__: Dict=4 , UpperCamelCase__: Dict=37 , UpperCamelCase__: str="gelu" , UpperCamelCase__: List[str]=0.1 , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: int=512 , UpperCamelCase__: Optional[Any]=16 , UpperCamelCase__: str=2 , UpperCamelCase__: Union[str, Any]=0.02 , UpperCamelCase__: Optional[int]=3 , UpperCamelCase__: Tuple=4 , UpperCamelCase__: Dict=None , ):
lowerCamelCase__ : List[Any] = parent
lowerCamelCase__ : str = batch_size
lowerCamelCase__ : int = seq_length
lowerCamelCase__ : Optional[int] = is_training
lowerCamelCase__ : int = use_input_mask
lowerCamelCase__ : List[Any] = use_token_type_ids
lowerCamelCase__ : List[str] = use_labels
lowerCamelCase__ : str = vocab_size
lowerCamelCase__ : List[Any] = hidden_size
lowerCamelCase__ : Optional[int] = num_hidden_layers
lowerCamelCase__ : Optional[Any] = num_attention_heads
lowerCamelCase__ : Optional[Any] = intermediate_size
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Tuple = hidden_dropout_prob
lowerCamelCase__ : int = attention_probs_dropout_prob
lowerCamelCase__ : Any = max_position_embeddings
lowerCamelCase__ : Any = type_vocab_size
lowerCamelCase__ : str = type_sequence_label_size
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : Any = num_labels
lowerCamelCase__ : int = num_choices
lowerCamelCase__ : Optional[Any] = scope
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Tuple = None
if self.use_input_mask:
lowerCamelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Union[str, Any] = None
lowerCamelCase__ : Any = None
lowerCamelCase__ : Union[str, Any] = None
lowerCamelCase__ : Optional[int] = None
if self.use_labels:
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self: Tuple ):
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=UpperCamelCase__ , )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: str , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : Any = FalconModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Tuple = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
lowerCamelCase__ : int = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: str , UpperCamelCase__: int , UpperCamelCase__: int , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple , UpperCamelCase__: Tuple , UpperCamelCase__: Dict , UpperCamelCase__: int , UpperCamelCase__: Tuple , ):
lowerCamelCase__ : Dict = True
lowerCamelCase__ : Dict = FalconModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Union[str, Any] = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )
lowerCamelCase__ : List[Any] = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: int , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Union[str, Any] , ):
lowerCamelCase__ : Any = FalconForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Any , UpperCamelCase__: str , UpperCamelCase__: List[str] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: int , UpperCamelCase__: Optional[int] , UpperCamelCase__: int , UpperCamelCase__: Optional[int] , ):
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : Dict = True
lowerCamelCase__ : str = FalconForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# first forward pass
lowerCamelCase__ : List[str] = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ , )
lowerCamelCase__ : Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase__ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase__ : Optional[int] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCamelCase__ : Any = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase__ : Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCamelCase__ : Tuple = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0]
lowerCamelCase__ : Dict = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0]
# select random slice
lowerCamelCase__ : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase__ : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : str = self.prepare_config_and_inputs()
(
lowerCamelCase__
) : List[str] = config_and_inputs
lowerCamelCase__ : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
a = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
a = (FalconForCausalLM,) if is_torch_available() else ()
a = (
{
"""feature-extraction""": FalconModel,
"""text-classification""": FalconForSequenceClassification,
"""text-generation""": FalconForCausalLM,
"""question-answering""": FalconForQuestionAnswering,
"""token-classification""": FalconForTokenClassification,
"""zero-shot""": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
a = False
a = False
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Tuple = FalconModelTester(self )
lowerCamelCase__ : Optional[int] = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self: Union[str, Any] ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
lowerCamelCase__ : str = alibi
self.model_tester.create_and_check_model(UpperCamelCase__ , *UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Union[str, Any] = 3
lowerCamelCase__ : Optional[int] = input_dict["""input_ids"""]
lowerCamelCase__ : Any = input_ids.ne(1 ).to(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCamelCase__ : Dict = FalconForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : str = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : int = 3
lowerCamelCase__ : Optional[Any] = """single_label_classification"""
lowerCamelCase__ : int = input_dict["""input_ids"""]
lowerCamelCase__ : int = input_ids.ne(1 ).to(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCamelCase__ : str = FalconForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : int = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Tuple = input_dict["""input_ids"""]
lowerCamelCase__ : Optional[int] = FalconForCausalLM(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : int = model(UpperCamelCase__ , use_cache=UpperCamelCase__ )
lowerCamelCase__ : str = input_ids.shape[0]
lowerCamelCase__ : Dict = model._convert_to_rw_cache(result.past_key_values )
lowerCamelCase__ : str = model._convert_cache_to_standard_format(UpperCamelCase__ , UpperCamelCase__ )
for layer in range(len(UpperCamelCase__ ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : List[Any] = 3
lowerCamelCase__ : Optional[Any] = """multi_label_classification"""
lowerCamelCase__ : List[str] = input_dict["""input_ids"""]
lowerCamelCase__ : str = input_ids.ne(1 ).to(UpperCamelCase__ )
lowerCamelCase__ : Tuple = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCamelCase__ : Dict = FalconForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Union[str, Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase_ ( self: Optional[Any] ):
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(UpperCamelCase__ , """use_cache""" ):
return
lowerCamelCase__ : List[str] = model_class(UpperCamelCase__ ).to(UpperCamelCase__ )
if "use_cache" not in inputs:
lowerCamelCase__ : int = True
lowerCamelCase__ : List[Any] = model(**UpperCamelCase__ )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
lowerCamelCase__ : Tuple = (
getattr(UpperCamelCase__ , """decoder_layers""" , UpperCamelCase__ )
or getattr(UpperCamelCase__ , """num_decoder_layers""" , UpperCamelCase__ )
or config.num_hidden_layers
)
lowerCamelCase__ : Optional[int] = getattr(UpperCamelCase__ , """num_kv_heads""" , config.num_attention_heads )
lowerCamelCase__ : Tuple = getattr(UpperCamelCase__ , """d_model""" , config.hidden_size )
lowerCamelCase__ : Union[str, Any] = embed_dim // num_attention_heads
lowerCamelCase__ : Any = outputs["""past_key_values"""]
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
lowerCamelCase__ : Tuple = inputs["""input_ids"""].shape
for i in range(UpperCamelCase__ ):
if config.new_decoder_architecture:
lowerCamelCase__ : Dict = config.num_attention_heads
elif config.multi_query:
lowerCamelCase__ : List[str] = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class _lowercase ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained("""Rocketknight1/falcon-rw-1b""" )
lowerCamelCase__ : Any = FalconForCausalLM.from_pretrained("""Rocketknight1/falcon-rw-1b""" )
model.eval()
model.to(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(UpperCamelCase__ )
lowerCamelCase__ : str = (
"""My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday."""
)
lowerCamelCase__ : List[str] = model.generate(**UpperCamelCase__ , do_sample=UpperCamelCase__ , max_new_tokens=19 )
lowerCamelCase__ : Optional[int] = tokenizer.batch_decode(UpperCamelCase__ )[0]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: int ):
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
lowerCamelCase__ : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = FalconForCausalLM.from_pretrained(UpperCamelCase__ )
model.eval()
model.to(UpperCamelCase__ )
lowerCamelCase__ : List[str] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(UpperCamelCase__ )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**UpperCamelCase__ , do_sample=UpperCamelCase__ , max_new_tokens=4 )
model.generate(**UpperCamelCase__ , do_sample=UpperCamelCase__ , max_new_tokens=4 )
model.generate(**UpperCamelCase__ , num_beams=2 , max_new_tokens=4 )
@slow
def lowerCamelCase_ ( self: str ):
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
lowerCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(UpperCamelCase__ )
lowerCamelCase__ : str = FalconForCausalLM.from_pretrained(UpperCamelCase__ )
model.eval()
model.to(device=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(UpperCamelCase__ )
# Test results are the same with and without cache
lowerCamelCase__ : Union[str, Any] = model.generate(**UpperCamelCase__ , do_sample=UpperCamelCase__ , max_new_tokens=20 , use_cache=UpperCamelCase__ )
lowerCamelCase__ : str = model.generate(**UpperCamelCase__ , do_sample=UpperCamelCase__ , max_new_tokens=20 , use_cache=UpperCamelCase__ )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 705 |
'''simple docstring'''
from statistics import mean
import numpy as np
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list:
lowerCamelCase__ : Optional[int] = 0
# Number of processes finished
lowerCamelCase__ : Union[str, Any] = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
lowerCamelCase__ : Tuple = [0] * no_of_process
# List to include calculation results
lowerCamelCase__ : List[str] = [0] * no_of_process
# Sort by arrival time.
lowerCamelCase__ : Union[str, Any] = [burst_time[i] for i in np.argsort(UpperCamelCase )]
lowerCamelCase__ : List[Any] = [process_name[i] for i in np.argsort(UpperCamelCase )]
arrival_time.sort()
while no_of_process > finished_process_count:
lowerCamelCase__ : str = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
lowerCamelCase__ : Union[str, Any] = arrival_time[i]
lowerCamelCase__ : Any = 0
# Index showing the location of the process being performed
lowerCamelCase__ : Union[str, Any] = 0
# Saves the current response ratio.
lowerCamelCase__ : Any = 0
for i in range(0 , UpperCamelCase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
lowerCamelCase__ : Optional[int] = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
lowerCamelCase__ : int = temp
lowerCamelCase__ : str = i
# Calculate the turn around time
lowerCamelCase__ : Optional[int] = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
lowerCamelCase__ : List[str] = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list:
lowerCamelCase__ : int = [0] * no_of_process
for i in range(0 , UpperCamelCase ):
lowerCamelCase__ : Optional[Any] = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
_A : List[str] =5
_A : Optional[Any] =['''A''', '''B''', '''C''', '''D''', '''E''']
_A : Optional[int] =[1, 2, 3, 4, 5]
_A : Dict =[1, 2, 3, 4, 5]
_A : Any =calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
_A : Optional[int] =calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''')
for i in range(0, no_of_process):
print(
F'{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'
F'{turn_around_time[i]}\t\t\t{waiting_time[i]}'
)
print(F'average waiting time : {mean(waiting_time):.5f}')
print(F'average turn around time : {mean(turn_around_time):.5f}')
| 631 | 0 |
'''simple docstring'''
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
_A : List[Any] ='''bert-base-cased'''
_A : Optional[int] ='''google/pegasus-xsum'''
_A : Any =[''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
_A : Optional[Any] =['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
_A : Union[str, Any] ='''patrickvonplaten/t5-tiny-random'''
_A : Dict ='''sshleifer/bart-tiny-random'''
_A : Tuple ='''sshleifer/tiny-mbart'''
_A : str ='''sshleifer/tiny-marian-en-de'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
lowerCamelCase__ : Optional[Any] = """\n""".join(UpperCamelCase )
Path(UpperCamelCase ).open("""w""" ).writelines(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Union[str, Any]:
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(UpperCamelCase , f'''{split}.source''' ) , UpperCamelCase )
_dump_articles(os.path.join(UpperCamelCase , f'''{split}.target''' ) , UpperCamelCase )
return tmp_dir
class _lowercase ( _lowercase ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Tuple ):
lowerCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowerCamelCase__ : str = max(len(tokenizer.encode(UpperCamelCase__ ) ) for a in ARTICLES )
lowerCamelCase__ : str = max(len(tokenizer.encode(UpperCamelCase__ ) ) for a in SUMMARIES )
lowerCamelCase__ : int = 4
lowerCamelCase__ : List[str] = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
lowerCamelCase__ : Union[str, Any] = """ro_RO""", """de_DE""" # ignored for all but mbart, but never causes error.
lowerCamelCase__ : List[str] = SeqaSeqDataset(
UpperCamelCase__ , data_dir=UpperCamelCase__ , type_path="""train""" , max_source_length=UpperCamelCase__ , max_target_length=UpperCamelCase__ , src_lang=UpperCamelCase__ , tgt_lang=UpperCamelCase__ , )
lowerCamelCase__ : Optional[int] = DataLoader(UpperCamelCase__ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
lowerCamelCase__ : Optional[Any] = shift_tokens_right(batch["""labels"""] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: Tuple ):
lowerCamelCase__ : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowerCamelCase__ : List[Any] = max(len(tokenizer.encode(UpperCamelCase__ ) ) for a in ARTICLES )
lowerCamelCase__ : Dict = max(len(tokenizer.encode(UpperCamelCase__ ) ) for a in SUMMARIES )
lowerCamelCase__ : Dict = 4
lowerCamelCase__ : str = LegacySeqaSeqDataset(
UpperCamelCase__ , data_dir=UpperCamelCase__ , type_path="""train""" , max_source_length=20 , max_target_length=UpperCamelCase__ , )
lowerCamelCase__ : Tuple = DataLoader(UpperCamelCase__ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : List[Any] = AutoTokenizer.from_pretrained("""facebook/mbart-large-cc25""" )
lowerCamelCase__ : Union[str, Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
lowerCamelCase__ : Union[str, Any] = tmp_dir.joinpath("""train.source""" ).open().readlines()
lowerCamelCase__ : Any = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(UpperCamelCase__ , UpperCamelCase__ , 128 , UpperCamelCase__ )
lowerCamelCase__ : Any = {x.name for x in tmp_dir.iterdir()}
lowerCamelCase__ : Optional[int] = {x.name for x in save_dir.iterdir()}
lowerCamelCase__ : int = save_dir.joinpath("""train.source""" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(UpperCamelCase__ ) < len(UpperCamelCase__ )
assert len(UpperCamelCase__ ) == 1
assert len(packed_examples[0] ) == sum(len(UpperCamelCase__ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="""This test requires fairseq""" )
def lowerCamelCase_ ( self: Tuple ):
if not FAIRSEQ_AVAILABLE:
return
lowerCamelCase__ : Dict = self._get_dataset(max_len=64 )
lowerCamelCase__ : Optional[int] = 64
lowerCamelCase__ : Tuple = ds.make_dynamic_sampler(UpperCamelCase__ , required_batch_size_multiple=UpperCamelCase__ )
lowerCamelCase__ : List[str] = [len(UpperCamelCase__ ) for x in batch_sampler]
assert len(set(UpperCamelCase__ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(UpperCamelCase__ ) == len(UpperCamelCase__ ) # no dropped or added examples
lowerCamelCase__ : Optional[Any] = DataLoader(UpperCamelCase__ , batch_sampler=UpperCamelCase__ , collate_fn=ds.collate_fn , num_workers=2 )
lowerCamelCase__ : Tuple = []
lowerCamelCase__ : Any = []
for batch in data_loader:
lowerCamelCase__ : Optional[int] = batch["""input_ids"""].shape
lowerCamelCase__ : Optional[Any] = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
lowerCamelCase__ : str = np.product(batch["""input_ids"""].shape )
num_src_per_batch.append(UpperCamelCase__ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(UpperCamelCase__ )
assert num_src_per_batch[0] == max(UpperCamelCase__ )
if failures:
raise AssertionError(F'''too many tokens in {len(UpperCamelCase__ )} batches''' )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Any = self._get_dataset(max_len=512 )
lowerCamelCase__ : Union[str, Any] = 2
lowerCamelCase__ : Tuple = ds.make_sortish_sampler(UpperCamelCase__ , shuffle=UpperCamelCase__ )
lowerCamelCase__ : Any = DataLoader(UpperCamelCase__ , batch_size=UpperCamelCase__ , collate_fn=ds.collate_fn , num_workers=2 )
lowerCamelCase__ : int = DataLoader(UpperCamelCase__ , batch_size=UpperCamelCase__ , collate_fn=ds.collate_fn , num_workers=2 , sampler=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = tokenizer.pad_token_id
def count_pad_tokens(UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int]="input_ids" ):
return [batch[k].eq(UpperCamelCase__ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(UpperCamelCase__ , k="""labels""" ) ) < sum(count_pad_tokens(UpperCamelCase__ , k="""labels""" ) )
assert sum(count_pad_tokens(UpperCamelCase__ ) ) < sum(count_pad_tokens(UpperCamelCase__ ) )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Dict=1_000 , UpperCamelCase__: List[Any]=128 ):
if os.getenv("""USE_REAL_DATA""" , UpperCamelCase__ ):
lowerCamelCase__ : str = """examples/seq2seq/wmt_en_ro"""
lowerCamelCase__ : Any = max_len * 2 * 64
if not Path(UpperCamelCase__ ).joinpath("""train.len""" ).exists():
save_len_file(UpperCamelCase__ , UpperCamelCase__ )
else:
lowerCamelCase__ : List[str] = """examples/seq2seq/test_data/wmt_en_ro"""
lowerCamelCase__ : Optional[Any] = max_len * 4
save_len_file(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Any = AutoTokenizer.from_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = SeqaSeqDataset(
UpperCamelCase__ , data_dir=UpperCamelCase__ , type_path="""train""" , max_source_length=UpperCamelCase__ , max_target_length=UpperCamelCase__ , n_obs=UpperCamelCase__ , )
return ds, max_tokens, tokenizer
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : int = self._get_dataset()
lowerCamelCase__ : Optional[Any] = set(DistributedSortishSampler(UpperCamelCase__ , 256 , num_replicas=2 , rank=0 , add_extra_examples=UpperCamelCase__ ) )
lowerCamelCase__ : str = set(DistributedSortishSampler(UpperCamelCase__ , 256 , num_replicas=2 , rank=1 , add_extra_examples=UpperCamelCase__ ) )
assert idsa.intersection(UpperCamelCase__ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: str ):
lowerCamelCase__ : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase__ , use_fast=UpperCamelCase__ )
if tok_name == MBART_TINY:
lowerCamelCase__ : Dict = SeqaSeqDataset(
UpperCamelCase__ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="""train""" , max_source_length=4 , max_target_length=8 , src_lang="""EN""" , tgt_lang="""FR""" , )
lowerCamelCase__ : Optional[Any] = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
lowerCamelCase__ : Dict = SeqaSeqDataset(
UpperCamelCase__ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="""train""" , max_source_length=4 , max_target_length=8 , )
lowerCamelCase__ : Optional[int] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(UpperCamelCase__ ) == 1 if tok_name == BART_TINY else len(UpperCamelCase__ ) == 0 | 706 |
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 631 | 0 |
'''simple docstring'''
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class _lowercase ( _lowercase , _lowercase ):
a = 1
@register_to_config
def __init__( self: List[Any] , UpperCamelCase__: str=2_000 , UpperCamelCase__: Dict=0.1 , UpperCamelCase__: Dict=20 , UpperCamelCase__: str=1e-3 ):
lowerCamelCase__ : Dict = None
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Optional[int] = None
def lowerCamelCase_ ( self: str , UpperCamelCase__: str , UpperCamelCase__: Union[str, torch.device] = None ):
lowerCamelCase__ : Any = torch.linspace(1 , self.config.sampling_eps , UpperCamelCase__ , device=UpperCamelCase__ )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Any , UpperCamelCase__: Dict , UpperCamelCase__: int=None ):
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
lowerCamelCase__ : List[Any] = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
lowerCamelCase__ : Union[str, Any] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
lowerCamelCase__ : List[str] = std.flatten()
while len(std.shape ) < len(score.shape ):
lowerCamelCase__ : Optional[int] = std.unsqueeze(-1 )
lowerCamelCase__ : List[str] = -score / std
# compute
lowerCamelCase__ : Any = -1.0 / len(self.timesteps )
lowerCamelCase__ : List[Any] = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
lowerCamelCase__ : List[str] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
lowerCamelCase__ : str = beta_t.unsqueeze(-1 )
lowerCamelCase__ : str = -0.5 * beta_t * x
lowerCamelCase__ : Dict = torch.sqrt(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = drift - diffusion**2 * score
lowerCamelCase__ : Tuple = x + drift * dt
# add noise
lowerCamelCase__ : Union[str, Any] = randn_tensor(x.shape , layout=x.layout , generator=UpperCamelCase__ , device=x.device , dtype=x.dtype )
lowerCamelCase__ : Union[str, Any] = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self: Optional[int] ):
return self.config.num_train_timesteps
| 707 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
def __init__( self: List[str] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str]=13 , UpperCamelCase__: Optional[int]=30 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: List[str]=3 , UpperCamelCase__: List[str]=True , UpperCamelCase__: Any=True , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Any=5 , UpperCamelCase__: Optional[Any]=4 , UpperCamelCase__: Dict=37 , UpperCamelCase__: List[Any]="gelu" , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: List[Any]=10 , UpperCamelCase__: Tuple=0.02 , UpperCamelCase__: Optional[int]=3 , UpperCamelCase__: Dict=0.6 , UpperCamelCase__: int=None , ):
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : Optional[Any] = patch_size
lowerCamelCase__ : Any = num_channels
lowerCamelCase__ : Any = is_training
lowerCamelCase__ : Union[str, Any] = use_labels
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : List[str] = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : str = intermediate_size
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : Any = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : int = initializer_range
lowerCamelCase__ : List[str] = mask_ratio
lowerCamelCase__ : List[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowerCamelCase__ : str = (image_size // patch_size) ** 2
lowerCamelCase__ : Optional[int] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[int] = None
if self.use_labels:
lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Any = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: str ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[int] , UpperCamelCase__: int ):
lowerCamelCase__ : Tuple = ViTMAEModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: str , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict ):
lowerCamelCase__ : int = ViTMAEForPreTraining(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ )
lowerCamelCase__ : Any = (self.image_size // self.patch_size) ** 2
lowerCamelCase__ : str = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowerCamelCase__ : Dict = 1
lowerCamelCase__ : Optional[int] = ViTMAEForPreTraining(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
lowerCamelCase__ : Tuple = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Optional[int] = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any = config_and_inputs
lowerCamelCase__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
a = {"""feature-extraction""": ViTMAEModel} if is_torch_available() else {}
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Tuple = ViTMAEModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self: Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def lowerCamelCase_ ( self: Dict ):
pass
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : str = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase__ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Any = model_class(UpperCamelCase__ )
lowerCamelCase__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Any = [*signature.parameters.keys()]
lowerCamelCase__ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Dict , UpperCamelCase__: Optional[int] ):
# make masks reproducible
np.random.seed(2 )
lowerCamelCase__ : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
lowerCamelCase__ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCamelCase__ : Tuple = torch.from_numpy(UpperCamelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowerCamelCase__ : Tuple = pt_noise
super().check_pt_tf_models(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCamelCase__ : Optional[int] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Optional[int] = outputs[0].cpu().numpy()
lowerCamelCase__ : List[str] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : List[str] = model_class.from_pretrained(UpperCamelCase__ )
model.to(UpperCamelCase__ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
# Make sure we don't have nans
lowerCamelCase__ : Dict = after_outputs[0].cpu().numpy()
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase__ , 1e-5 )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self: Any ):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self: List[str] ):
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def lowerCamelCase_ ( self: Tuple ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase_ ( self: Optional[int] ):
pass
@slow
def lowerCamelCase_ ( self: List[str] ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Tuple = ViTMAEModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> List[Any]:
lowerCamelCase__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: List[str] ):
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self: Tuple ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowerCamelCase__ : str = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(UpperCamelCase__ )
lowerCamelCase__ : Tuple = self.default_image_processor
lowerCamelCase__ : List[str] = prepare_img()
lowerCamelCase__ : int = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowerCamelCase__ : List[str] = ViTMAEConfig()
lowerCamelCase__ : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowerCamelCase__ : Any = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
lowerCamelCase__ : List[Any] = model(**UpperCamelCase__ , noise=torch.from_numpy(UpperCamelCase__ ).to(device=UpperCamelCase__ ) )
# verify the logits
lowerCamelCase__ : List[str] = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : str = torch.tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCamelCase__ ) , atol=1e-4 ) )
| 631 | 0 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowercase ( _lowercase ):
def __init__( self: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int]=13 , UpperCamelCase__: Dict=7 , UpperCamelCase__: List[str]=True , UpperCamelCase__: List[Any]=True , UpperCamelCase__: Dict=True , UpperCamelCase__: List[Any]=True , UpperCamelCase__: int=99 , UpperCamelCase__: Union[str, Any]=32 , UpperCamelCase__: List[str]=5 , UpperCamelCase__: Tuple=4 , UpperCamelCase__: Optional[int]=37 , UpperCamelCase__: Optional[int]="gelu" , UpperCamelCase__: Optional[Any]=0.1 , UpperCamelCase__: Dict=0.1 , UpperCamelCase__: Optional[Any]=512 , UpperCamelCase__: Optional[Any]=16 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: Union[str, Any]=0.02 , UpperCamelCase__: Any=False , UpperCamelCase__: Dict=True , UpperCamelCase__: List[Any]="None" , UpperCamelCase__: Dict=3 , UpperCamelCase__: int=4 , UpperCamelCase__: Optional[int]=None , ):
lowerCamelCase__ : List[str] = parent
lowerCamelCase__ : Tuple = batch_size
lowerCamelCase__ : Tuple = seq_length
lowerCamelCase__ : Union[str, Any] = is_training
lowerCamelCase__ : Tuple = use_input_mask
lowerCamelCase__ : Dict = use_token_type_ids
lowerCamelCase__ : List[str] = use_labels
lowerCamelCase__ : Dict = vocab_size
lowerCamelCase__ : Optional[int] = hidden_size
lowerCamelCase__ : str = num_hidden_layers
lowerCamelCase__ : str = num_attention_heads
lowerCamelCase__ : Optional[int] = intermediate_size
lowerCamelCase__ : Tuple = hidden_act
lowerCamelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCamelCase__ : int = attention_probs_dropout_prob
lowerCamelCase__ : str = max_position_embeddings
lowerCamelCase__ : int = type_vocab_size
lowerCamelCase__ : Optional[int] = type_sequence_label_size
lowerCamelCase__ : List[Any] = initializer_range
lowerCamelCase__ : List[Any] = num_labels
lowerCamelCase__ : str = num_choices
lowerCamelCase__ : Optional[Any] = relative_attention
lowerCamelCase__ : int = position_biased_input
lowerCamelCase__ : int = pos_att_type
lowerCamelCase__ : int = scope
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Optional[Any] = None
if self.use_input_mask:
lowerCamelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowerCamelCase__ : int = None
if self.use_token_type_ids:
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : List[Any] = None
lowerCamelCase__ : int = None
if self.use_labels:
lowerCamelCase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : str = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self: Optional[int] ):
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : List[str] = self.get_config()
lowerCamelCase__ : str = 300
return config
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: List[Any] ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: str , UpperCamelCase__: List[Any] , UpperCamelCase__: str , UpperCamelCase__: int ):
lowerCamelCase__ : str = DebertaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Dict = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )[0]
lowerCamelCase__ : List[str] = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ )[0]
lowerCamelCase__ : int = model(UpperCamelCase__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def lowerCamelCase_ ( self: str , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Dict , UpperCamelCase__: Any , UpperCamelCase__: Tuple , UpperCamelCase__: Dict , UpperCamelCase__: Dict ):
lowerCamelCase__ : int = DebertaForMaskedLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Dict = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: str , UpperCamelCase__: Optional[Any] , UpperCamelCase__: str , UpperCamelCase__: Dict , UpperCamelCase__: List[Any] ):
lowerCamelCase__ : str = self.num_labels
lowerCamelCase__ : str = DebertaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Any = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: Optional[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Any , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : int = self.num_labels
lowerCamelCase__ : Tuple = DebertaForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Dict = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: int , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict ):
lowerCamelCase__ : List[str] = DebertaForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : int = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
(
lowerCamelCase__
) : Optional[int] = config_and_inputs
lowerCamelCase__ : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
a = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
a = True
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Tuple = DebertaModelTester(self )
lowerCamelCase__ : List[str] = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self: Optional[int] ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: Any ):
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : int = DebertaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowercase ( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def lowerCamelCase_ ( self: Union[str, Any] ):
pass
@slow
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : str = DebertaModel.from_pretrained("""microsoft/deberta-base""" )
lowerCamelCase__ : List[Any] = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
lowerCamelCase__ : str = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCamelCase__ : List[str] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0]
# compare the actual values for a slice.
lowerCamelCase__ : Any = torch.tensor(
[[[-0.5_986, -0.8_055, -0.8_462], [1.4_484, -0.9_348, -0.8_059], [0.3_123, 0.0_032, -1.4_131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase__ , atol=1e-4 ) , F'''{output[:, 1:4, 1:4]}''' )
| 708 |
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowercase ( _lowercase ):
a = """"""
a = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
a = None # compression type in fsspec. ex: "gzip"
a = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self: str , UpperCamelCase__: str = "" , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Optional[dict] = None , **UpperCamelCase__: List[Any] ):
super().__init__(self , **UpperCamelCase__ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCamelCase__ : List[Any] = fsspec.open(
UpperCamelCase__ , mode="""rb""" , protocol=UpperCamelCase__ , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowerCamelCase__ : str = os.path.basename(self.file.path.split("""::""" )[0] )
lowerCamelCase__ : Union[str, Any] = (
self.compressed_name[: self.compressed_name.rindex(""".""" )]
if """.""" in self.compressed_name
else self.compressed_name
)
lowerCamelCase__ : Tuple = None
@classmethod
def lowerCamelCase_ ( cls: Optional[int] , UpperCamelCase__: Optional[int] ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(UpperCamelCase__ ).lstrip("""/""" )
def lowerCamelCase_ ( self: Tuple ):
if self.dir_cache is None:
lowerCamelCase__ : Dict = {**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name}
lowerCamelCase__ : int = {f["""name"""]: f}
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: str ):
return self.file.open().read()
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: str , UpperCamelCase__: str = "rb" , UpperCamelCase__: Optional[int]=None , UpperCamelCase__: Tuple=True , UpperCamelCase__: Tuple=None , **UpperCamelCase__: Optional[Any] , ):
lowerCamelCase__ : Union[str, Any] = self._strip_protocol(UpperCamelCase__ )
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class _lowercase ( _lowercase ):
a = """bz2"""
a = """bz2"""
a = """.bz2"""
class _lowercase ( _lowercase ):
a = """gzip"""
a = """gzip"""
a = """.gz"""
class _lowercase ( _lowercase ):
a = """lz4"""
a = """lz4"""
a = """.lz4"""
class _lowercase ( _lowercase ):
a = """xz"""
a = """xz"""
a = """.xz"""
class _lowercase ( _lowercase ):
a = """zstd"""
a = """zstd"""
a = """.zst"""
def __init__( self: int , UpperCamelCase__: str , UpperCamelCase__: str = "rb" , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Optional[dict] = None , UpperCamelCase__: int = DEFAULT_BLOCK_SIZE , **UpperCamelCase__: Dict , ):
super().__init__(
fo=UpperCamelCase__ , mode=UpperCamelCase__ , target_protocol=UpperCamelCase__ , target_options=UpperCamelCase__ , block_size=UpperCamelCase__ , **UpperCamelCase__ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCamelCase__ : Tuple = self.file.__enter__
class _lowercase :
def __init__( self: Optional[int] , UpperCamelCase__: Any ):
lowerCamelCase__ : Optional[int] = file_
def __enter__( self: List[Any] ):
self._file.__enter__()
return self
def __exit__( self: Any , *UpperCamelCase__: str , **UpperCamelCase__: Any ):
self._file.__exit__(*UpperCamelCase__ , **UpperCamelCase__ )
def __iter__( self: Any ):
return iter(self._file )
def lowerCamelCase_ ( self: List[Any] ):
return next(self._file )
def __getattr__( self: List[str] , UpperCamelCase__: Dict ):
return getattr(self._file , UpperCamelCase__ )
def fixed_enter(*UpperCamelCase__: Union[str, Any] , **UpperCamelCase__: List[str] ):
return WrappedFile(_enter(*UpperCamelCase__ , **UpperCamelCase__ ) )
lowerCamelCase__ : Optional[Any] = fixed_enter
| 631 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = 1000000 ) -> int:
lowerCamelCase__ : List[str] = set(range(3 , UpperCamelCase , 2 ) )
primes.add(2 )
for p in range(3 , UpperCamelCase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , UpperCamelCase , UpperCamelCase ) ) )
lowerCamelCase__ : Tuple = [float(UpperCamelCase ) for n in range(limit + 1 )]
for p in primes:
for n in range(UpperCamelCase , limit + 1 , UpperCamelCase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 709 |
'''simple docstring'''
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A : int =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
print("""Loading config file...""" )
def flatten_yaml_as_dict(UpperCamelCase , UpperCamelCase="" , UpperCamelCase="." ):
lowerCamelCase__ : Optional[int] = []
for k, v in d.items():
lowerCamelCase__ : Optional[int] = parent_key + sep + k if parent_key else k
if isinstance(UpperCamelCase , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(UpperCamelCase , UpperCamelCase , sep=UpperCamelCase ).items() )
else:
items.append((new_key, v) )
return dict(UpperCamelCase )
lowerCamelCase__ : Any = argparse.Namespace()
with open(UpperCamelCase , """r""" ) as yaml_file:
try:
lowerCamelCase__ : int = yaml.load(UpperCamelCase , Loader=yaml.FullLoader )
lowerCamelCase__ : Tuple = flatten_yaml_as_dict(UpperCamelCase )
for k, v in flat_cfg.items():
setattr(UpperCamelCase , UpperCamelCase , UpperCamelCase )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(UpperCamelCase , str(UpperCamelCase ) ) )
return config
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
lowerCamelCase__ : Union[str, Any] = MobileViTVaConfig()
lowerCamelCase__ : str = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
lowerCamelCase__ : Optional[Any] = 1000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
lowerCamelCase__ : int = 384
else:
lowerCamelCase__ : Optional[int] = 256
lowerCamelCase__ : str = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
lowerCamelCase__ : Tuple = 21000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
lowerCamelCase__ : str = 384
else:
lowerCamelCase__ : Any = 256
lowerCamelCase__ : int = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
lowerCamelCase__ : Dict = 151
lowerCamelCase__ : str = 512
lowerCamelCase__ : List[Any] = """ade20k-id2label.json"""
lowerCamelCase__ : Union[str, Any] = True
elif task_name.startswith("""voc_""" ):
lowerCamelCase__ : Tuple = 21
lowerCamelCase__ : Optional[int] = 512
lowerCamelCase__ : List[Any] = """pascal-voc-id2label.json"""
lowerCamelCase__ : Tuple = True
# orig_config
lowerCamelCase__ : Optional[int] = load_orig_config_file(UpperCamelCase )
assert getattr(UpperCamelCase , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model"
lowerCamelCase__ : int = getattr(UpperCamelCase , """model.classification.mitv2.width_multiplier""" , 1.0 )
assert (
getattr(UpperCamelCase , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
lowerCamelCase__ : Tuple = getattr(UpperCamelCase , """model.classification.activation.name""" , """swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
lowerCamelCase__ : Any = getattr(UpperCamelCase , """model.segmentation.output_stride""" , 16 )
if "_deeplabv3" in task_name:
lowerCamelCase__ : str = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] )
lowerCamelCase__ : Tuple = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_out_channels""" , 512 )
lowerCamelCase__ : List[Any] = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 )
# id2label
lowerCamelCase__ : Tuple = """huggingface/label-files"""
lowerCamelCase__ : Optional[Any] = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase__ : Union[str, Any] = {int(UpperCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : int = idalabel
lowerCamelCase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any:
lowerCamelCase__ : List[Any] = dct.pop(UpperCamelCase )
lowerCamelCase__ : Dict = val
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> Tuple:
if base_model:
lowerCamelCase__ : Optional[int] = """"""
else:
lowerCamelCase__ : Optional[Any] = """mobilevitv2."""
lowerCamelCase__ : List[str] = []
for k in state_dict.keys():
if k[:8] == "encoder.":
lowerCamelCase__ : Optional[Any] = k[8:]
else:
lowerCamelCase__ : Optional[Any] = k
if ".block." in k:
lowerCamelCase__ : Dict = k_new.replace(""".block.""" , """.""" )
if ".conv." in k:
lowerCamelCase__ : List[Any] = k_new.replace(""".conv.""" , """.convolution.""" )
if ".norm." in k:
lowerCamelCase__ : str = k_new.replace(""".norm.""" , """.normalization.""" )
if "conv_1." in k:
lowerCamelCase__ : Any = k_new.replace("""conv_1.""" , f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
lowerCamelCase__ : Optional[Any] = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
lowerCamelCase__ : Dict = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" )
if ".red_1x1." in k:
lowerCamelCase__ : str = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
lowerCamelCase__ : List[str] = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
lowerCamelCase__ : Optional[Any] = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
lowerCamelCase__ : Dict = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
lowerCamelCase__ : int = [0, 1]
elif i == 4:
lowerCamelCase__ : str = [0, 1, 2, 3]
elif i == 5:
lowerCamelCase__ : Dict = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
lowerCamelCase__ : List[Any] = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
lowerCamelCase__ : Optional[int] = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
lowerCamelCase__ : Optional[int] = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
lowerCamelCase__ : str = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" )
if "pre_norm_attn.1." in k:
lowerCamelCase__ : str = k_new.replace("""pre_norm_attn.1.""" , """attention.""" )
if "pre_norm_ffn.0." in k:
lowerCamelCase__ : Optional[Any] = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" )
if "pre_norm_ffn.1." in k:
lowerCamelCase__ : Union[str, Any] = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
lowerCamelCase__ : List[Any] = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" )
if "classifier.1." in k:
lowerCamelCase__ : Tuple = k_new.replace("""classifier.1.""" , """classifier.""" )
if "seg_head." in k:
lowerCamelCase__ : Optional[int] = k_new.replace("""seg_head.""" , """segmentation_head.""" )
if ".aspp_layer." in k:
lowerCamelCase__ : Any = k_new.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in k:
lowerCamelCase__ : Any = k_new.replace(""".aspp_pool.""" , """.""" )
rename_keys.append((k, k_new) )
return rename_keys
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[str]:
lowerCamelCase__ : Union[str, Any] = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(UpperCamelCase )
for k in keys_to_ignore:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Dict:
lowerCamelCase__ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
lowerCamelCase__ : Tuple = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCamelCase__ : str = get_mobilevitva_config(UpperCamelCase , UpperCamelCase )
# load original state_dict
lowerCamelCase__ : List[str] = torch.load(UpperCamelCase , map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
lowerCamelCase__ : int = MobileViTVaForSemanticSegmentation(UpperCamelCase ).eval()
lowerCamelCase__ : Tuple = False
else:
lowerCamelCase__ : int = MobileViTVaForImageClassification(UpperCamelCase ).eval()
lowerCamelCase__ : Optional[Any] = False
# remove and rename some keys of load the original model
lowerCamelCase__ : Tuple = checkpoint
remove_unused_keys(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = create_rename_keys(UpperCamelCase , base_model=UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# load modified state_dict
model.load_state_dict(UpperCamelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCamelCase__ : int = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowerCamelCase__ : Dict = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowerCamelCase__ : str = model(**UpperCamelCase )
# verify classification model
if task_name.startswith("""imagenet""" ):
lowerCamelCase__ : Dict = outputs.logits
lowerCamelCase__ : Optional[Any] = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
lowerCamelCase__ : Optional[Any] = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01] )
assert torch.allclose(logits[0, :3] , UpperCamelCase , atol=1E-4 )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_A : Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
_A : Dict =parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 631 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A : List[str] =logging.get_logger(__name__)
_A : int ={
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class _lowercase ( _lowercase ):
a = """roberta"""
def __init__( self: Optional[Any] , UpperCamelCase__: Union[str, Any]=50_265 , UpperCamelCase__: Any=768 , UpperCamelCase__: Optional[int]=12 , UpperCamelCase__: Any=12 , UpperCamelCase__: Optional[Any]=3_072 , UpperCamelCase__: Tuple="gelu" , UpperCamelCase__: int=0.1 , UpperCamelCase__: str=0.1 , UpperCamelCase__: Optional[int]=512 , UpperCamelCase__: List[Any]=2 , UpperCamelCase__: Tuple=0.02 , UpperCamelCase__: Optional[Any]=1e-12 , UpperCamelCase__: Union[str, Any]=1 , UpperCamelCase__: str=0 , UpperCamelCase__: List[str]=2 , UpperCamelCase__: Optional[Any]="absolute" , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: Tuple=None , **UpperCamelCase__: Union[str, Any] , ):
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase__ : Tuple = vocab_size
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : Any = num_hidden_layers
lowerCamelCase__ : Union[str, Any] = num_attention_heads
lowerCamelCase__ : Any = hidden_act
lowerCamelCase__ : Optional[int] = intermediate_size
lowerCamelCase__ : Optional[int] = hidden_dropout_prob
lowerCamelCase__ : Dict = attention_probs_dropout_prob
lowerCamelCase__ : Optional[int] = max_position_embeddings
lowerCamelCase__ : Tuple = type_vocab_size
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : List[str] = layer_norm_eps
lowerCamelCase__ : str = position_embedding_type
lowerCamelCase__ : Union[str, Any] = use_cache
lowerCamelCase__ : Any = classifier_dropout
class _lowercase ( _lowercase ):
@property
def lowerCamelCase_ ( self: Tuple ):
if self.task == "multiple-choice":
lowerCamelCase__ : int = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCamelCase__ : Union[str, Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] ) | 710 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _lowercase ( _lowercase ):
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , """width_multiplier""" ) )
class _lowercase :
def __init__( self: str , UpperCamelCase__: Optional[int] , UpperCamelCase__: str=13 , UpperCamelCase__: Any=64 , UpperCamelCase__: Optional[Any]=2 , UpperCamelCase__: str=3 , UpperCamelCase__: List[str]="swish" , UpperCamelCase__: Any=3 , UpperCamelCase__: Optional[int]=32 , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: int=0.02 , UpperCamelCase__: Dict=True , UpperCamelCase__: Dict=True , UpperCamelCase__: Any=10 , UpperCamelCase__: int=None , UpperCamelCase__: List[Any]=0.25 , UpperCamelCase__: str=0.0 , UpperCamelCase__: Optional[int]=0.0 , ):
lowerCamelCase__ : Any = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : str = patch_size
lowerCamelCase__ : Optional[int] = num_channels
lowerCamelCase__ : Optional[Any] = make_divisible(512 * width_multiplier , divisor=8 )
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Any = conv_kernel_size
lowerCamelCase__ : Any = output_stride
lowerCamelCase__ : Union[str, Any] = classifier_dropout_prob
lowerCamelCase__ : List[str] = use_labels
lowerCamelCase__ : Optional[Any] = is_training
lowerCamelCase__ : List[str] = num_labels
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : List[Any] = scope
lowerCamelCase__ : Tuple = width_multiplier
lowerCamelCase__ : List[Any] = ffn_dropout
lowerCamelCase__ : Any = attn_dropout
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple = None
lowerCamelCase__ : Optional[Any] = None
if self.use_labels:
lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCamelCase_ ( self: List[Any] ):
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: int , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : Union[str, Any] = MobileViTVaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : str = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Tuple ):
lowerCamelCase__ : Tuple = self.num_labels
lowerCamelCase__ : Dict = MobileViTVaForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : int = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: str ):
lowerCamelCase__ : List[str] = self.num_labels
lowerCamelCase__ : Union[str, Any] = MobileViTVaForSemanticSegmentation(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = config_and_inputs
lowerCamelCase__ : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
a = (
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Tuple = MobileViTVaModelTester(self )
lowerCamelCase__ : List[str] = MobileViTVaConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" )
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" )
def lowerCamelCase_ ( self: List[str] ):
pass
@unittest.skip(reason="""MobileViTV2 does not output attentions""" )
def lowerCamelCase_ ( self: Union[str, Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" )
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase_ ( self: Tuple ):
pass
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Tuple = [*signature.parameters.keys()]
lowerCamelCase__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
def check_hidden_states_output(UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Optional[int] = outputs.hidden_states
lowerCamelCase__ : List[Any] = 5
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCamelCase__ : int = 2
for i in range(len(UpperCamelCase__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : int = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : str = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: Union[str, Any] ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Union[str, Any] = MobileViTVaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> Optional[int]:
lowerCamelCase__ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Tuple ):
return (
MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" )
if is_vision_available()
else None
)
@slow
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Optional[Any] = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to(
UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = self.default_image_processor
lowerCamelCase__ : List[Any] = prepare_img()
lowerCamelCase__ : Any = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : int = model(**UpperCamelCase__ )
# verify the logits
lowerCamelCase__ : str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : int = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Optional[Any] = model.to(UpperCamelCase__ )
lowerCamelCase__ : Any = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Union[str, Any] = prepare_img()
lowerCamelCase__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(**UpperCamelCase__ )
lowerCamelCase__ : str = outputs.logits
# verify the logits
lowerCamelCase__ : List[str] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , UpperCamelCase__ )
lowerCamelCase__ : Any = torch.tensor(
[
[[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]],
[[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]],
[[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]],
] , device=UpperCamelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Optional[Any] = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : List[Any] = model.to(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Optional[Any] = prepare_img()
lowerCamelCase__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Dict = model(**UpperCamelCase__ )
lowerCamelCase__ : List[str] = outputs.logits.detach().cpu()
lowerCamelCase__ : List[Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ , target_sizes=[(50, 60)] )
lowerCamelCase__ : int = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ )
lowerCamelCase__ : int = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
| 631 | 0 |
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> float:
lowerCamelCase__ : List[Any] = 0
while len(UpperCamelCase ) > 1:
lowerCamelCase__ : Union[str, Any] = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
lowerCamelCase__ : Tuple = files.index(min(UpperCamelCase ) )
temp += files[min_index]
files.pop(UpperCamelCase )
files.append(UpperCamelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_A : Optional[Any] =logging.get_logger(__name__)
_A : Dict ={'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_A : Tuple ={
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
_A : List[Any] ={
'''gpt-neox-20b''': 2_048,
}
class _lowercase ( _lowercase ):
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = ["""input_ids""", """attention_mask"""]
def __init__( self: Optional[int] , UpperCamelCase__: Union[str, Any]=None , UpperCamelCase__: int=None , UpperCamelCase__: Tuple=None , UpperCamelCase__: Any="<|endoftext|>" , UpperCamelCase__: Any="<|endoftext|>" , UpperCamelCase__: Union[str, Any]="<|endoftext|>" , UpperCamelCase__: Tuple=False , **UpperCamelCase__: str , ):
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
lowerCamelCase__ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCamelCase__ ) != add_prefix_space:
lowerCamelCase__ : Any = getattr(UpperCamelCase__ , pre_tok_state.pop("""type""" ) )
lowerCamelCase__ : Dict = add_prefix_space
lowerCamelCase__ : Optional[int] = pre_tok_class(**UpperCamelCase__ )
lowerCamelCase__ : Dict = add_prefix_space
def lowerCamelCase_ ( self: int , UpperCamelCase__: str , UpperCamelCase__: Optional[str] = None ):
lowerCamelCase__ : Optional[Any] = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: "Conversation" ):
lowerCamelCase__ : str = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
lowerCamelCase__ : int = input_ids[-self.model_max_length :]
return input_ids
| 631 | 0 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[Any]:
lowerCamelCase__ : Any = image.size
lowerCamelCase__ : List[str] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
lowerCamelCase__ : List[Any] = image.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] )
lowerCamelCase__ : Any = np.array(UpperCamelCase ).astype(np.floataa ) / 255.0
lowerCamelCase__ : int = image[None].transpose(0 , 3 , 1 , 2 )
lowerCamelCase__ : Optional[int] = torch.from_numpy(UpperCamelCase )
return 2.0 * image - 1.0
class _lowercase ( _lowercase ):
def __init__( self: int , UpperCamelCase__: VQModel , UpperCamelCase__: UNetaDModel , UpperCamelCase__: Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
super().__init__()
self.register_modules(vqvae=UpperCamelCase__ , unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
@torch.no_grad()
def __call__( self: Union[str, Any] , UpperCamelCase__: Union[torch.Tensor, PIL.Image.Image] = None , UpperCamelCase__: Optional[int] = 1 , UpperCamelCase__: Optional[int] = 100 , UpperCamelCase__: Optional[float] = 0.0 , UpperCamelCase__: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase__: Optional[str] = "pil" , UpperCamelCase__: bool = True , ):
if isinstance(UpperCamelCase__ , PIL.Image.Image ):
lowerCamelCase__ : Any = 1
elif isinstance(UpperCamelCase__ , torch.Tensor ):
lowerCamelCase__ : Tuple = image.shape[0]
else:
raise ValueError(F'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCamelCase__ )}''' )
if isinstance(UpperCamelCase__ , PIL.Image.Image ):
lowerCamelCase__ : List[str] = preprocess(UpperCamelCase__ )
lowerCamelCase__ : str = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
lowerCamelCase__ : int = (batch_size, self.unet.config.in_channels // 2, height, width)
lowerCamelCase__ : List[Any] = next(self.unet.parameters() ).dtype
lowerCamelCase__ : Union[str, Any] = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__ , device=self.device , dtype=UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = image.to(device=self.device , dtype=UpperCamelCase__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(UpperCamelCase__ , device=self.device )
lowerCamelCase__ : Optional[Any] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase__ : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCamelCase__ : Optional[Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase__ : Tuple = {}
if accepts_eta:
lowerCamelCase__ : Dict = eta
for t in self.progress_bar(UpperCamelCase__ ):
# concat latents and low resolution image in the channel dimension.
lowerCamelCase__ : int = torch.cat([latents, image] , dim=1 )
lowerCamelCase__ : Union[str, Any] = self.scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
# predict the noise residual
lowerCamelCase__ : Optional[Any] = self.unet(UpperCamelCase__ , UpperCamelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase__ : Dict = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
# decode the image latents with the VQVAE
lowerCamelCase__ : List[str] = self.vqvae.decode(UpperCamelCase__ ).sample
lowerCamelCase__ : List[Any] = torch.clamp(UpperCamelCase__ , -1.0 , 1.0 )
lowerCamelCase__ : List[Any] = image / 2 + 0.5
lowerCamelCase__ : int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase__ : Dict = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase__ )
| 712 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A : Dict ={
'''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''],
'''processing_git''': ['''GitProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] =[
'''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GitForCausalLM''',
'''GitModel''',
'''GitPreTrainedModel''',
'''GitVisionModel''',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_A : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 631 | 0 |
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
_A : Optional[int] =300 # TEMPERATURE (unit = K)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> float:
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
_A : int =get_tests_dir('''fixtures/test_sentencepiece.model''')
_A : Tuple ={'''target_lang''': '''fi''', '''source_lang''': '''en'''}
_A : int ='''>>zh<<'''
_A : Dict ='''Helsinki-NLP/'''
if is_torch_available():
_A : List[Any] ='''pt'''
elif is_tf_available():
_A : Optional[int] ='''tf'''
else:
_A : Dict ='''jax'''
@require_sentencepiece
class _lowercase ( _lowercase , unittest.TestCase ):
a = MarianTokenizer
a = False
a = True
def lowerCamelCase_ ( self: List[str] ):
super().setUp()
lowerCamelCase__ : List[Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
lowerCamelCase__ : Optional[Any] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
lowerCamelCase__ : Optional[int] = Path(self.tmpdirname )
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
lowerCamelCase__ : Dict = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self: Optional[Any] , **UpperCamelCase__: Any ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: str , UpperCamelCase__: List[str] ):
return (
"This is a test",
"This is a test",
)
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Any = """</s>"""
lowerCamelCase__ : List[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(UpperCamelCase__ ) , 9 )
def lowerCamelCase_ ( self: int ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : List[Any] = MarianTokenizer.from_pretrained(F'''{ORG_NAME}opus-mt-en-de''' )
lowerCamelCase__ : Optional[int] = en_de_tokenizer(["""I am a small frog"""] , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = [38, 121, 14, 697, 38_848, 0]
self.assertListEqual(UpperCamelCase__ , batch.input_ids[0] )
lowerCamelCase__ : List[str] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Tuple = [x.name for x in Path(UpperCamelCase__ ).glob("""*""" )]
self.assertIn("""source.spm""" , UpperCamelCase__ )
MarianTokenizer.from_pretrained(UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : List[Any] = self.get_tokenizer()
lowerCamelCase__ : Any = tok(
["""I am a small frog""" * 1_000, """I am a small frog"""] , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : str = self.get_tokenizer()
lowerCamelCase__ : Dict = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def lowerCamelCase_ ( self: List[str] ):
# fmt: off
lowerCamelCase__ : int = {"""input_ids""": [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Union[str, Any] = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
lowerCamelCase__ : str = """Tämä on testi"""
lowerCamelCase__ : Any = """This is a test"""
lowerCamelCase__ : int = [76, 7, 2_047, 2]
lowerCamelCase__ : List[str] = [69, 12, 11, 940, 2]
lowerCamelCase__ : Tuple = tokenizer(UpperCamelCase__ ).input_ids
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = tokenizer(text_target=UpperCamelCase__ ).input_ids
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Tuple = tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
| 631 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _lowercase ( _lowercase , unittest.TestCase ):
'''simple docstring'''
a = XLMTokenizer
a = False
def lowerCamelCase_ ( self: Optional[Any] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase__ : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
lowerCamelCase__ : Any = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
lowerCamelCase__ : Optional[Any] = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
lowerCamelCase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(UpperCamelCase__ ) )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : Tuple = """lower newer"""
lowerCamelCase__ : List[Any] = """lower newer"""
return input_text, output_text
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Union[str, Any] = XLMTokenizer(self.vocab_file , self.merges_file )
lowerCamelCase__ : str = """lower"""
lowerCamelCase__ : int = ["""low""", """er</w>"""]
lowerCamelCase__ : str = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Tuple = tokens + ["""<unk>"""]
lowerCamelCase__ : str = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : List[str] = XLMTokenizer.from_pretrained("""xlm-mlm-en-2048""" )
lowerCamelCase__ : Tuple = tokenizer.encode("""sequence builders""" , add_special_tokens=UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=UpperCamelCase__ )
lowerCamelCase__ : Any = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 714 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Optional[Any] =logging.get_logger(__name__)
_A : Optional[int] ={
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class _lowercase ( _lowercase ):
a = """rwkv"""
a = {"""max_position_embeddings""": """context_length"""}
def __init__( self: Tuple , UpperCamelCase__: Optional[Any]=50_277 , UpperCamelCase__: Union[str, Any]=1_024 , UpperCamelCase__: Tuple=4_096 , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Dict=None , UpperCamelCase__: Dict=None , UpperCamelCase__: int=1e-5 , UpperCamelCase__: Any=0 , UpperCamelCase__: str=0 , UpperCamelCase__: Union[str, Any]=6 , UpperCamelCase__: Optional[int]=False , UpperCamelCase__: Dict=True , **UpperCamelCase__: Dict , ):
lowerCamelCase__ : Dict = vocab_size
lowerCamelCase__ : Optional[Any] = context_length
lowerCamelCase__ : Optional[Any] = hidden_size
lowerCamelCase__ : Any = num_hidden_layers
lowerCamelCase__ : int = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowerCamelCase__ : Union[str, Any] = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowerCamelCase__ : List[str] = layer_norm_epsilon
lowerCamelCase__ : int = rescale_every
lowerCamelCase__ : Optional[int] = use_cache
lowerCamelCase__ : Dict = bos_token_id
lowerCamelCase__ : Any = eos_token_id
super().__init__(
tie_word_embeddings=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 631 | 0 |
import math
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> float:
if (
not isinstance(UpperCamelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * power_factor
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> float:
if (
not isinstance(UpperCamelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : str =logging.get_logger(__name__)
_A : int ={
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class _lowercase ( _lowercase ):
a = """roc_bert"""
def __init__( self: Optional[Any] , UpperCamelCase__: Any=30_522 , UpperCamelCase__: Optional[Any]=768 , UpperCamelCase__: Union[str, Any]=12 , UpperCamelCase__: Tuple=12 , UpperCamelCase__: Tuple=3_072 , UpperCamelCase__: str="gelu" , UpperCamelCase__: List[Any]=0.1 , UpperCamelCase__: List[str]=0.1 , UpperCamelCase__: Dict=512 , UpperCamelCase__: str=2 , UpperCamelCase__: str=0.02 , UpperCamelCase__: Tuple=1e-12 , UpperCamelCase__: Any=True , UpperCamelCase__: Union[str, Any]=0 , UpperCamelCase__: List[Any]="absolute" , UpperCamelCase__: Any=None , UpperCamelCase__: Any=True , UpperCamelCase__: Optional[int]=True , UpperCamelCase__: Union[str, Any]=768 , UpperCamelCase__: int=910 , UpperCamelCase__: Tuple=512 , UpperCamelCase__: int=24_858 , UpperCamelCase__: Optional[Any]=True , **UpperCamelCase__: Optional[Any] , ):
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : Tuple = max_position_embeddings
lowerCamelCase__ : List[Any] = hidden_size
lowerCamelCase__ : int = num_hidden_layers
lowerCamelCase__ : Tuple = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Dict = attention_probs_dropout_prob
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : Tuple = type_vocab_size
lowerCamelCase__ : Optional[Any] = layer_norm_eps
lowerCamelCase__ : List[Any] = use_cache
lowerCamelCase__ : Tuple = enable_pronunciation
lowerCamelCase__ : Union[str, Any] = enable_shape
lowerCamelCase__ : Union[str, Any] = pronunciation_embed_dim
lowerCamelCase__ : Any = pronunciation_vocab_size
lowerCamelCase__ : int = shape_embed_dim
lowerCamelCase__ : Tuple = shape_vocab_size
lowerCamelCase__ : Optional[Any] = concat_input
lowerCamelCase__ : str = position_embedding_type
lowerCamelCase__ : Dict = classifier_dropout
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 631 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_A : Any ={
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_A : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 716 |
'''simple docstring'''
import sys
import turtle
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> None:
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
_A : Any =turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
_A : Dict =[(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 631 | 0 |
'''simple docstring'''
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_A : Union[str, Any] =False
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: str=32 ):
set_seed(0 )
lowerCamelCase__ : Optional[int] = UNetaDModel(sample_size=UpperCamelCase__ , in_channels=3 , out_channels=3 )
lowerCamelCase__ : List[Any] = torch.optim.SGD(model.parameters() , lr=0.0_001 )
return model, optimizer
@slow
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Optional[Any] = """cpu""" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
lowerCamelCase__ : List[Any] = DDPMScheduler(
num_train_timesteps=1_000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=UpperCamelCase__ , )
lowerCamelCase__ : Any = DDIMScheduler(
num_train_timesteps=1_000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=UpperCamelCase__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
lowerCamelCase__ : str = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(UpperCamelCase__ ) for _ in range(4 )]
lowerCamelCase__ : Tuple = [torch.randn((4, 3, 32, 32) ).to(UpperCamelCase__ ) for _ in range(4 )]
lowerCamelCase__ : Tuple = [torch.randint(0 , 1_000 , (4,) ).long().to(UpperCamelCase__ ) for _ in range(4 )]
# train with a DDPM scheduler
lowerCamelCase__ : Any = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
lowerCamelCase__ : str = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCamelCase__ : str = model(UpperCamelCase__ , timesteps[i] ).sample
lowerCamelCase__ : Tuple = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
lowerCamelCase__ : Optional[Any] = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
lowerCamelCase__ : Optional[Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCamelCase__ : Dict = model(UpperCamelCase__ , timesteps[i] ).sample
lowerCamelCase__ : Union[str, Any] = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
| 717 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _lowercase :
def __init__( self: int , UpperCamelCase__: Dict , UpperCamelCase__: List[str]=13 , UpperCamelCase__: Union[str, Any]=7 , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: List[Any]=True , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: int=True , UpperCamelCase__: List[Any]=99 , UpperCamelCase__: Tuple=32 , UpperCamelCase__: List[str]=2 , UpperCamelCase__: Optional[Any]=4 , UpperCamelCase__: Optional[int]=37 , UpperCamelCase__: Any="gelu" , UpperCamelCase__: Any=0.1 , UpperCamelCase__: int=0.1 , UpperCamelCase__: Optional[Any]=512 , UpperCamelCase__: List[str]=16 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: Dict=0.02 , UpperCamelCase__: Tuple=3 , UpperCamelCase__: Optional[int]=4 , UpperCamelCase__: Union[str, Any]=None , ):
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Union[str, Any] = 13
lowerCamelCase__ : Any = 7
lowerCamelCase__ : int = True
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : Dict = True
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : str = 99
lowerCamelCase__ : Dict = 384
lowerCamelCase__ : Optional[Any] = 2
lowerCamelCase__ : Optional[int] = 4
lowerCamelCase__ : Optional[Any] = 37
lowerCamelCase__ : Union[str, Any] = """gelu"""
lowerCamelCase__ : int = 0.1
lowerCamelCase__ : Optional[Any] = 0.1
lowerCamelCase__ : List[Any] = 512
lowerCamelCase__ : Optional[Any] = 16
lowerCamelCase__ : Any = 2
lowerCamelCase__ : Optional[Any] = 0.02
lowerCamelCase__ : int = 3
lowerCamelCase__ : List[str] = 4
lowerCamelCase__ : Any = 128
lowerCamelCase__ : List[Any] = 2
lowerCamelCase__ : Optional[Any] = 9
lowerCamelCase__ : Any = 1
lowerCamelCase__ : Optional[int] = None
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : str = None
if self.use_input_mask:
lowerCamelCase__ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : List[str] = None
if self.use_token_type_ids:
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ : int = None
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Optional[Any] = None
if self.use_labels:
lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : Any = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : List[Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCamelCase__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Dict , UpperCamelCase__: List[str] , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: str , UpperCamelCase__: Any ):
lowerCamelCase__ : List[Any] = TFConvBertModel(config=UpperCamelCase__ )
lowerCamelCase__ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCamelCase__ : List[str] = [input_ids, input_mask]
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: Tuple , UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple ):
lowerCamelCase__ : int = TFConvBertForMaskedLM(config=UpperCamelCase__ )
lowerCamelCase__ : Tuple = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : int = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self: str , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : int = self.num_labels
lowerCamelCase__ : Dict = TFConvBertForSequenceClassification(config=UpperCamelCase__ )
lowerCamelCase__ : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[str] , UpperCamelCase__: int , UpperCamelCase__: List[str] , UpperCamelCase__: Dict ):
lowerCamelCase__ : Optional[int] = self.num_choices
lowerCamelCase__ : Dict = TFConvBertForMultipleChoice(config=UpperCamelCase__ )
lowerCamelCase__ : int = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ : List[str] = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ : Any = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ : Tuple = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self: str , UpperCamelCase__: Any , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Optional[int] , UpperCamelCase__: str , UpperCamelCase__: int ):
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : List[str] = TFConvBertForTokenClassification(config=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: List[Any] , UpperCamelCase__: str , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : Optional[int] = TFConvBertForQuestionAnswering(config=UpperCamelCase__ )
lowerCamelCase__ : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : str = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : str = config_and_inputs
lowerCamelCase__ : Optional[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
a = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a = False
a = False
a = False
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Dict = TFConvBertModelTester(self )
lowerCamelCase__ : Dict = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self: List[str] ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Dict = True
lowerCamelCase__ : Tuple = True
if hasattr(UpperCamelCase__ , """use_cache""" ):
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : List[str] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
lowerCamelCase__ : Tuple = getattr(self.model_tester , """key_length""" , UpperCamelCase__ )
for model_class in self.all_model_classes:
lowerCamelCase__ : int = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : Dict = len(model(UpperCamelCase__ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ , saved_model=UpperCamelCase__ )
lowerCamelCase__ : int = os.path.join(UpperCamelCase__ , """saved_model""" , """1""" )
lowerCamelCase__ : List[Any] = tf.keras.models.load_model(UpperCamelCase__ )
lowerCamelCase__ : Any = model(UpperCamelCase__ )
if self.is_encoder_decoder:
lowerCamelCase__ : Dict = outputs["""encoder_hidden_states"""]
lowerCamelCase__ : Any = outputs["""encoder_attentions"""]
else:
lowerCamelCase__ : int = outputs["""hidden_states"""]
lowerCamelCase__ : Optional[int] = outputs["""attentions"""]
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Union[str, Any] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : int = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
lowerCamelCase__ : Any = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
lowerCamelCase__ : Optional[int] = getattr(self.model_tester , """key_length""" , UpperCamelCase__ )
lowerCamelCase__ : List[Any] = getattr(self.model_tester , """key_length""" , UpperCamelCase__ )
def check_decoder_attentions_output(UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : List[Any] = len(UpperCamelCase__ )
self.assertEqual(out_len % 2 , 0 )
lowerCamelCase__ : Any = outputs.decoder_attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(UpperCamelCase__: List[str] ):
lowerCamelCase__ : Any = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
lowerCamelCase__ : int = True
lowerCamelCase__ : Any = False
lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : List[str] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Optional[int] = len(UpperCamelCase__ )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
if self.is_encoder_decoder:
lowerCamelCase__ : str = model_class(UpperCamelCase__ )
lowerCamelCase__ : Tuple = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_decoder_attentions_output(UpperCamelCase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Dict = model_class(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
# Check attention is always last and order is fine
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : int = True
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCamelCase__ ) )
self.assertEqual(model.config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
@require_tf
class _lowercase ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Dict = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
lowerCamelCase__ : Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )[0]
lowerCamelCase__ : Dict = [1, 6, 768]
self.assertEqual(output.shape , UpperCamelCase__ )
lowerCamelCase__ : Dict = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 )
| 631 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> float:
lowerCamelCase__ : Union[str, Any] = u
for i in range(1 , UpperCamelCase ):
lowerCamelCase__ : Optional[Any] = temp * (u - i)
return temp
def SCREAMING_SNAKE_CASE_ () -> None:
lowerCamelCase__ : int = int(input("""enter the numbers of values: """ ) )
lowerCamelCase__ : list[list[float]] = []
for _ in range(UpperCamelCase ):
y.append([] )
for i in range(UpperCamelCase ):
for j in range(UpperCamelCase ):
y[i].append(UpperCamelCase )
lowerCamelCase__ : int = 0
print("""enter the values of parameters in a list: """ )
lowerCamelCase__ : Dict = list(map(UpperCamelCase , input().split() ) )
print("""enter the values of corresponding parameters: """ )
for i in range(UpperCamelCase ):
lowerCamelCase__ : List[Any] = float(input() )
lowerCamelCase__ : Union[str, Any] = int(input("""enter the value to interpolate: """ ) )
lowerCamelCase__ : List[str] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , UpperCamelCase ):
for j in range(n - i ):
lowerCamelCase__ : Union[str, Any] = y[j + 1][i - 1] - y[j][i - 1]
lowerCamelCase__ : Any = y[0][0]
for i in range(1 , UpperCamelCase ):
summ += (ucal(UpperCamelCase , UpperCamelCase ) * y[0][i]) / math.factorial(UpperCamelCase )
print(f'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 718 |
'''simple docstring'''
_A : List[str] ='''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 631 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A : Dict ={
'''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''],
'''processing_git''': ['''GitProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] =[
'''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GitForCausalLM''',
'''GitModel''',
'''GitPreTrainedModel''',
'''GitVisionModel''',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_A : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 719 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Any =logging.get_logger(__name__)
_A : Dict ={
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class _lowercase ( _lowercase ):
a = """trocr"""
a = ["""past_key_values"""]
a = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self: Optional[Any] , UpperCamelCase__: int=50_265 , UpperCamelCase__: int=1_024 , UpperCamelCase__: Optional[Any]=12 , UpperCamelCase__: Dict=16 , UpperCamelCase__: int=4_096 , UpperCamelCase__: Tuple="gelu" , UpperCamelCase__: int=512 , UpperCamelCase__: Dict=0.1 , UpperCamelCase__: Tuple=0.0 , UpperCamelCase__: str=0.0 , UpperCamelCase__: Any=2 , UpperCamelCase__: Dict=0.02 , UpperCamelCase__: Optional[Any]=0.0 , UpperCamelCase__: str=True , UpperCamelCase__: Tuple=False , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: Tuple=True , UpperCamelCase__: Dict=1 , UpperCamelCase__: List[str]=0 , UpperCamelCase__: Union[str, Any]=2 , **UpperCamelCase__: str , ):
lowerCamelCase__ : Any = vocab_size
lowerCamelCase__ : Tuple = d_model
lowerCamelCase__ : Any = decoder_layers
lowerCamelCase__ : Dict = decoder_attention_heads
lowerCamelCase__ : str = decoder_ffn_dim
lowerCamelCase__ : Tuple = activation_function
lowerCamelCase__ : Dict = max_position_embeddings
lowerCamelCase__ : int = dropout
lowerCamelCase__ : int = attention_dropout
lowerCamelCase__ : List[Any] = activation_dropout
lowerCamelCase__ : Union[str, Any] = init_std
lowerCamelCase__ : Optional[int] = decoder_layerdrop
lowerCamelCase__ : Dict = use_cache
lowerCamelCase__ : Any = scale_embedding
lowerCamelCase__ : Optional[int] = use_learned_position_embeddings
lowerCamelCase__ : List[str] = layernorm_embedding
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
| 631 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_A : Dict =logging.get_logger(__name__)
_A : Dict ={
'''facebook/convnextv2-tiny-1k-224''': '''https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json''',
}
class _lowercase ( _lowercase , _lowercase ):
a = """convnextv2"""
def __init__( self: List[Any] , UpperCamelCase__: Optional[int]=3 , UpperCamelCase__: Any=4 , UpperCamelCase__: int=4 , UpperCamelCase__: Optional[Any]=None , UpperCamelCase__: Optional[Any]=None , UpperCamelCase__: int="gelu" , UpperCamelCase__: Union[str, Any]=0.02 , UpperCamelCase__: Optional[Any]=1e-12 , UpperCamelCase__: Tuple=0.0 , UpperCamelCase__: List[str]=224 , UpperCamelCase__: Dict=None , UpperCamelCase__: List[Any]=None , **UpperCamelCase__: Any , ):
super().__init__(**UpperCamelCase__ )
lowerCamelCase__ : List[str] = num_channels
lowerCamelCase__ : Optional[int] = patch_size
lowerCamelCase__ : List[Any] = num_stages
lowerCamelCase__ : Any = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
lowerCamelCase__ : List[Any] = [3, 3, 9, 3] if depths is None else depths
lowerCamelCase__ : Any = hidden_act
lowerCamelCase__ : List[Any] = initializer_range
lowerCamelCase__ : int = layer_norm_eps
lowerCamelCase__ : Dict = drop_path_rate
lowerCamelCase__ : Dict = image_size
lowerCamelCase__ : Union[str, Any] = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
lowerCamelCase__ : Optional[Any] = get_aligned_output_features_output_indices(
out_features=UpperCamelCase__ , out_indices=UpperCamelCase__ , stage_names=self.stage_names )
| 720 |
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[Any]:
lowerCamelCase__ : str = [False] * len(UpperCamelCase )
lowerCamelCase__ : str = [-1] * len(UpperCamelCase )
def dfs(UpperCamelCase , UpperCamelCase ):
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Union[str, Any] = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCamelCase , 1 - c )
for i in range(len(UpperCamelCase ) ):
if not visited[i]:
dfs(UpperCamelCase , 0 )
for i in range(len(UpperCamelCase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_A : int ={0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 631 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class _lowercase ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Any = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
lowerCamelCase__ : Tuple = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
lowerCamelCase__ : List[Any] = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
lowerCamelCase__ : Tuple = torch.tensor(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCamelCase__ : Union[str, Any] = model(UpperCamelCase__ )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , UpperCamelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCamelCase__ , atol=1e-3 ) )
@slow
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Any = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" )
lowerCamelCase__ : List[Any] = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
lowerCamelCase__ : Dict = torch.Size((1, 12, 1_024) ) # batch_size, sequence_length, embedding_vector_dim
lowerCamelCase__ : Optional[int] = torch.tensor(
[[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCamelCase__ : Any = model(UpperCamelCase__ )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , UpperCamelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCamelCase__ , atol=1e-3 ) )
| 721 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class _lowercase ( _lowercase ):
def __init__( self: Optional[Any] , UpperCamelCase__: Any , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : Optional[int] = dataset
lowerCamelCase__ : Optional[int] = process
lowerCamelCase__ : List[str] = params
def __len__( self: List[str] ):
return len(self.dataset )
def __getitem__( self: Any , UpperCamelCase__: int ):
lowerCamelCase__ : Dict = self.dataset[i]
lowerCamelCase__ : Union[str, Any] = self.process(UpperCamelCase__ , **self.params )
return processed
class _lowercase ( _lowercase ):
def __init__( self: Optional[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: str , UpperCamelCase__: Tuple , UpperCamelCase__: Any=None ):
lowerCamelCase__ : int = loader
lowerCamelCase__ : str = infer
lowerCamelCase__ : Optional[int] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : int = loader_batch_size
# Internal bookkeeping
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Optional[Any] = None
def __len__( self: Dict ):
return len(self.loader )
def __iter__( self: Optional[int] ):
lowerCamelCase__ : List[Any] = iter(self.loader )
return self
def lowerCamelCase_ ( self: Any ):
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
lowerCamelCase__ : str = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
lowerCamelCase__ : int = {}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Convert ModelOutput to tuple first
lowerCamelCase__ : str = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
lowerCamelCase__ : Tuple = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowerCamelCase__ : str = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
lowerCamelCase__ : List[str] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowerCamelCase__ : int = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
lowerCamelCase__ : List[str] = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCamelCase__ : Optional[Any] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCamelCase__ : int = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
lowerCamelCase__ : str = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
lowerCamelCase__ : Optional[int] = self._loader_batch_data.__class__(UpperCamelCase__ )
self._loader_batch_index += 1
return result
def lowerCamelCase_ ( self: List[Any] ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
lowerCamelCase__ : Optional[Any] = next(self.iterator )
lowerCamelCase__ : List[str] = self.infer(UpperCamelCase__ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCamelCase__ , torch.Tensor ):
lowerCamelCase__ : Optional[Any] = processed
else:
lowerCamelCase__ : Union[str, Any] = list(processed.keys() )[0]
lowerCamelCase__ : Any = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ : Any = len(UpperCamelCase__ )
else:
lowerCamelCase__ : List[str] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCamelCase__ : Union[str, Any] = observed_batch_size
# Setting internal index to unwrap the batch
lowerCamelCase__ : List[Any] = processed
lowerCamelCase__ : List[Any] = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class _lowercase ( _lowercase ):
def __init__( self: List[str] , UpperCamelCase__: Any , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any]=None ):
super().__init__(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __iter__( self: Union[str, Any] ):
lowerCamelCase__ : str = iter(self.loader )
lowerCamelCase__ : int = None
return self
def lowerCamelCase_ ( self: str ):
if self.subiterator is None:
lowerCamelCase__ : Optional[Any] = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
lowerCamelCase__ : Tuple = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
lowerCamelCase__ : Any = self.infer(next(self.iterator ) , **self.params )
lowerCamelCase__ : Union[str, Any] = next(self.subiterator )
return processed
class _lowercase ( _lowercase ):
def __iter__( self: List[Any] ):
lowerCamelCase__ : int = iter(self.loader )
return self
def lowerCamelCase_ ( self: Tuple ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
lowerCamelCase__ : List[str] = False
lowerCamelCase__ : Union[str, Any] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
lowerCamelCase__ : Any = self.loader_batch_item()
lowerCamelCase__ : Tuple = item.pop("""is_last""" )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
while not is_last:
lowerCamelCase__ : str = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(UpperCamelCase__ , torch.Tensor ):
lowerCamelCase__ : Dict = processed
else:
lowerCamelCase__ : Dict = list(processed.keys() )[0]
lowerCamelCase__ : Dict = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ : List[Any] = len(UpperCamelCase__ )
else:
lowerCamelCase__ : Dict = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCamelCase__ : str = observed_batch_size
lowerCamelCase__ : str = processed
lowerCamelCase__ : Optional[int] = 0
while self._loader_batch_index < self.loader_batch_size:
lowerCamelCase__ : List[Any] = self.loader_batch_item()
lowerCamelCase__ : str = item.pop("""is_last""" )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
else:
lowerCamelCase__ : Optional[Any] = processed
lowerCamelCase__ : Optional[int] = item.pop("""is_last""" )
accumulator.append(UpperCamelCase__ )
return accumulator
class _lowercase ( _lowercase ):
def __init__( self: Optional[int] , UpperCamelCase__: Dataset , UpperCamelCase__: str ):
lowerCamelCase__ : Union[str, Any] = dataset
lowerCamelCase__ : str = key
def __len__( self: Optional[Any] ):
return len(self.dataset )
def __getitem__( self: List[str] , UpperCamelCase__: Any ):
return self.dataset[i][self.key]
class _lowercase ( _lowercase ):
def __init__( self: Optional[int] , UpperCamelCase__: Dataset , UpperCamelCase__: str , UpperCamelCase__: str ):
lowerCamelCase__ : str = dataset
lowerCamelCase__ : Dict = keya
lowerCamelCase__ : List[str] = keya
def __len__( self: str ):
return len(self.dataset )
def __getitem__( self: List[str] , UpperCamelCase__: Union[str, Any] ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 631 | 0 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ : str = {
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : int = [
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 632 | """simple docstring"""
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
def __get__( self : Dict ,lowerCamelCase__ : str ,lowerCamelCase__ : str=None ):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('unreadable attribute' )
UpperCAmelCase__ = '__cached_' + self.fget.__name__
UpperCAmelCase__ = getattr(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
if cached is None:
UpperCAmelCase__ = self.fget(lowerCamelCase__ )
setattr(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
return cached
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'''invalid truth value {val!r}''' )
def a_ ( lowerCamelCase ):
if is_torch_fx_proxy(lowerCamelCase ):
return True
if is_torch_available():
import torch
if isinstance(lowerCamelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(lowerCamelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(lowerCamelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(lowerCamelCase , np.ndarray )
def a_ ( lowerCamelCase ):
return isinstance(lowerCamelCase , np.ndarray )
def a_ ( lowerCamelCase ):
return _is_numpy(lowerCamelCase )
def a_ ( lowerCamelCase ):
import torch
return isinstance(lowerCamelCase , torch.Tensor )
def a_ ( lowerCamelCase ):
return False if not is_torch_available() else _is_torch(lowerCamelCase )
def a_ ( lowerCamelCase ):
import torch
return isinstance(lowerCamelCase , torch.device )
def a_ ( lowerCamelCase ):
return False if not is_torch_available() else _is_torch_device(lowerCamelCase )
def a_ ( lowerCamelCase ):
import torch
if isinstance(lowerCamelCase , lowerCamelCase ):
if hasattr(lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = getattr(lowerCamelCase , lowerCamelCase )
else:
return False
return isinstance(lowerCamelCase , torch.dtype )
def a_ ( lowerCamelCase ):
return False if not is_torch_available() else _is_torch_dtype(lowerCamelCase )
def a_ ( lowerCamelCase ):
import tensorflow as tf
return isinstance(lowerCamelCase , tf.Tensor )
def a_ ( lowerCamelCase ):
return False if not is_tf_available() else _is_tensorflow(lowerCamelCase )
def a_ ( lowerCamelCase ):
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(lowerCamelCase , 'is_symbolic_tensor' ):
return tf.is_symbolic_tensor(lowerCamelCase )
return type(lowerCamelCase ) == tf.Tensor
def a_ ( lowerCamelCase ):
return False if not is_tf_available() else _is_tf_symbolic_tensor(lowerCamelCase )
def a_ ( lowerCamelCase ):
import jax.numpy as jnp # noqa: F811
return isinstance(lowerCamelCase , jnp.ndarray )
def a_ ( lowerCamelCase ):
return False if not is_flax_available() else _is_jax(lowerCamelCase )
def a_ ( lowerCamelCase ):
if isinstance(lowerCamelCase , (dict, UserDict) ):
return {k: to_py_obj(lowerCamelCase ) for k, v in obj.items()}
elif isinstance(lowerCamelCase , (list, tuple) ):
return [to_py_obj(lowerCamelCase ) for o in obj]
elif is_tf_tensor(lowerCamelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(lowerCamelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(lowerCamelCase ):
return np.asarray(lowerCamelCase ).tolist()
elif isinstance(lowerCamelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def a_ ( lowerCamelCase ):
if isinstance(lowerCamelCase , (dict, UserDict) ):
return {k: to_numpy(lowerCamelCase ) for k, v in obj.items()}
elif isinstance(lowerCamelCase , (list, tuple) ):
return np.array(lowerCamelCase )
elif is_tf_tensor(lowerCamelCase ):
return obj.numpy()
elif is_torch_tensor(lowerCamelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(lowerCamelCase ):
return np.asarray(lowerCamelCase )
else:
return obj
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Dict ):
UpperCAmelCase__ = fields(self )
# Safety and consistency checks
if not len(lowerCamelCase__ ):
raise ValueError(f'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f'''{self.__class__.__name__} should not have more than one required field.''' )
UpperCAmelCase__ = getattr(self ,class_fields[0].name )
UpperCAmelCase__ = all(getattr(self ,field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(lowerCamelCase__ ):
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
UpperCAmelCase__ = first_field.items()
UpperCAmelCase__ = True
else:
try:
UpperCAmelCase__ = iter(lowerCamelCase__ )
UpperCAmelCase__ = True
except TypeError:
UpperCAmelCase__ = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(lowerCamelCase__ ):
if (
not isinstance(lowerCamelCase__ ,(list, tuple) )
or not len(lowerCamelCase__ ) == 2
or not isinstance(element[0] ,lowerCamelCase__ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
UpperCAmelCase__ = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self ,element[0] ,element[1] )
if element[1] is not None:
UpperCAmelCase__ = element[1]
elif first_field is not None:
UpperCAmelCase__ = first_field
else:
for field in class_fields:
UpperCAmelCase__ = getattr(self ,field.name )
if v is not None:
UpperCAmelCase__ = v
def __delitem__( self : List[Any] ,*lowerCamelCase__ : Any ,**lowerCamelCase__ : Dict ):
raise Exception(f'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def __lowerCAmelCase ( self : str ,*lowerCamelCase__ : int ,**lowerCamelCase__ : Optional[Any] ):
raise Exception(f'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def __lowerCAmelCase ( self : List[Any] ,*lowerCamelCase__ : int ,**lowerCamelCase__ : List[Any] ):
raise Exception(f'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def __lowerCAmelCase ( self : Optional[int] ,*lowerCamelCase__ : List[str] ,**lowerCamelCase__ : Optional[int] ):
raise Exception(f'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__( self : Tuple ,lowerCamelCase__ : List[Any] ):
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
UpperCAmelCase__ = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : int ,lowerCamelCase__ : int ,lowerCamelCase__ : int ):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(lowerCamelCase__ ,lowerCamelCase__ )
super().__setattr__(lowerCamelCase__ ,lowerCamelCase__ )
def __setitem__( self : Dict ,lowerCamelCase__ : int ,lowerCamelCase__ : Any ):
# Will raise a KeyException if needed
super().__setitem__(lowerCamelCase__ ,lowerCamelCase__ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : List[str] ):
return tuple(self[k] for k in self.keys() )
class snake_case ( __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
@classmethod
def __lowerCAmelCase ( cls : str ,lowerCamelCase__ : Optional[int] ):
raise ValueError(
f'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "longest"
snake_case__ = "max_length"
snake_case__ = "do_not_pad"
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "pt"
snake_case__ = "tf"
snake_case__ = "np"
snake_case__ = "jax"
class snake_case :
"""simple docstring"""
def __init__( self : int ,lowerCamelCase__ : List[ContextManager] ):
UpperCAmelCase__ = context_managers
UpperCAmelCase__ = ExitStack()
def __enter__( self : Union[str, Any] ):
for context_manager in self.context_managers:
self.stack.enter_context(lowerCamelCase__ )
def __exit__( self : List[Any] ,*lowerCamelCase__ : List[str] ,**lowerCamelCase__ : Dict ):
self.stack.__exit__(*lowerCamelCase__ ,**lowerCamelCase__ )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = infer_framework(lowerCamelCase )
if framework == "tf":
UpperCAmelCase__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase__ = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase__ = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = model_class.__name__
UpperCAmelCase__ = infer_framework(lowerCamelCase )
if framework == "tf":
UpperCAmelCase__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase__ = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase__ = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def a_ ( lowerCamelCase , lowerCamelCase = "" , lowerCamelCase = "." ):
def _flatten_dict(lowerCamelCase , lowerCamelCase="" , lowerCamelCase="." ):
for k, v in d.items():
UpperCAmelCase__ = str(lowerCamelCase ) + delimiter + str(lowerCamelCase ) if parent_key else k
if v and isinstance(lowerCamelCase , lowerCamelCase ):
yield from flatten_dict(lowerCamelCase , lowerCamelCase , delimiter=lowerCamelCase ).items()
else:
yield key, v
return dict(_flatten_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
@contextmanager
def a_ ( lowerCamelCase , lowerCamelCase = False ):
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def a_ ( lowerCamelCase , lowerCamelCase=None ):
if is_numpy_array(lowerCamelCase ):
return np.transpose(lowerCamelCase , axes=lowerCamelCase )
elif is_torch_tensor(lowerCamelCase ):
return array.T if axes is None else array.permute(*lowerCamelCase )
elif is_tf_tensor(lowerCamelCase ):
import tensorflow as tf
return tf.transpose(lowerCamelCase , perm=lowerCamelCase )
elif is_jax_tensor(lowerCamelCase ):
return jnp.transpose(lowerCamelCase , axes=lowerCamelCase )
else:
raise ValueError(f'''Type not supported for transpose: {type(lowerCamelCase )}.''' )
def a_ ( lowerCamelCase , lowerCamelCase ):
if is_numpy_array(lowerCamelCase ):
return np.reshape(lowerCamelCase , lowerCamelCase )
elif is_torch_tensor(lowerCamelCase ):
return array.reshape(*lowerCamelCase )
elif is_tf_tensor(lowerCamelCase ):
import tensorflow as tf
return tf.reshape(lowerCamelCase , lowerCamelCase )
elif is_jax_tensor(lowerCamelCase ):
return jnp.reshape(lowerCamelCase , lowerCamelCase )
else:
raise ValueError(f'''Type not supported for reshape: {type(lowerCamelCase )}.''' )
def a_ ( lowerCamelCase , lowerCamelCase=None ):
if is_numpy_array(lowerCamelCase ):
return np.squeeze(lowerCamelCase , axis=lowerCamelCase )
elif is_torch_tensor(lowerCamelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=lowerCamelCase )
elif is_tf_tensor(lowerCamelCase ):
import tensorflow as tf
return tf.squeeze(lowerCamelCase , axis=lowerCamelCase )
elif is_jax_tensor(lowerCamelCase ):
return jnp.squeeze(lowerCamelCase , axis=lowerCamelCase )
else:
raise ValueError(f'''Type not supported for squeeze: {type(lowerCamelCase )}.''' )
def a_ ( lowerCamelCase , lowerCamelCase ):
if is_numpy_array(lowerCamelCase ):
return np.expand_dims(lowerCamelCase , lowerCamelCase )
elif is_torch_tensor(lowerCamelCase ):
return array.unsqueeze(dim=lowerCamelCase )
elif is_tf_tensor(lowerCamelCase ):
import tensorflow as tf
return tf.expand_dims(lowerCamelCase , axis=lowerCamelCase )
elif is_jax_tensor(lowerCamelCase ):
return jnp.expand_dims(lowerCamelCase , axis=lowerCamelCase )
else:
raise ValueError(f'''Type not supported for expand_dims: {type(lowerCamelCase )}.''' )
def a_ ( lowerCamelCase ):
if is_numpy_array(lowerCamelCase ):
return np.size(lowerCamelCase )
elif is_torch_tensor(lowerCamelCase ):
return array.numel()
elif is_tf_tensor(lowerCamelCase ):
import tensorflow as tf
return tf.size(lowerCamelCase )
elif is_jax_tensor(lowerCamelCase ):
return array.size
else:
raise ValueError(f'''Type not supported for expand_dims: {type(lowerCamelCase )}.''' )
def a_ ( lowerCamelCase , lowerCamelCase ):
for key, value in auto_map.items():
if isinstance(lowerCamelCase , (tuple, list) ):
UpperCAmelCase__ = [f'''{repo_id}--{v}''' if (v is not None and '--' not in v) else v for v in value]
elif value is not None and "--" not in value:
UpperCAmelCase__ = f'''{repo_id}--{value}'''
return auto_map
def a_ ( lowerCamelCase ):
for base_class in inspect.getmro(lowerCamelCase ):
UpperCAmelCase__ = base_class.__module__
UpperCAmelCase__ = base_class.__name__
if module.startswith('tensorflow' ) or module.startswith('keras' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('torch' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('flax' ) or module.startswith('jax' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'''Could not infer framework from class {model_class}.''' )
| 632 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = KandinskyVaaPriorPipeline
snake_case__ = ["prompt"]
snake_case__ = ["prompt", "negative_prompt"]
snake_case__ = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
snake_case__ = False
@property
def __lowerCAmelCase ( self : List[str] ):
return 32
@property
def __lowerCAmelCase ( self : str ):
return 32
@property
def __lowerCAmelCase ( self : List[Any] ):
return self.time_input_dim
@property
def __lowerCAmelCase ( self : Dict ):
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self : List[Any] ):
return 100
@property
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def __lowerCAmelCase ( self : Optional[int] ):
torch.manual_seed(0 )
UpperCAmelCase__ = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,)
return CLIPTextModelWithProjection(lowerCamelCase__ )
@property
def __lowerCAmelCase ( self : int ):
torch.manual_seed(0 )
UpperCAmelCase__ = {
'num_attention_heads': 2,
'attention_head_dim': 12,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
UpperCAmelCase__ = PriorTransformer(**lowerCamelCase__ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
UpperCAmelCase__ = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def __lowerCAmelCase ( self : Optional[int] ):
torch.manual_seed(0 )
UpperCAmelCase__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size ,image_size=224 ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_channels=3 ,num_hidden_layers=5 ,patch_size=14 ,)
UpperCAmelCase__ = CLIPVisionModelWithProjection(lowerCamelCase__ )
return model
@property
def __lowerCAmelCase ( self : Union[str, Any] ):
UpperCAmelCase__ = CLIPImageProcessor(
crop_size=224 ,do_center_crop=lowerCamelCase__ ,do_normalize=lowerCamelCase__ ,do_resize=lowerCamelCase__ ,image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] ,image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] ,resample=3 ,size=224 ,)
return image_processor
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.dummy_prior
UpperCAmelCase__ = self.dummy_image_encoder
UpperCAmelCase__ = self.dummy_text_encoder
UpperCAmelCase__ = self.dummy_tokenizer
UpperCAmelCase__ = self.dummy_image_processor
UpperCAmelCase__ = UnCLIPScheduler(
variance_type='fixed_small_log' ,prediction_type='sample' ,num_train_timesteps=1_000 ,clip_sample=lowerCamelCase__ ,clip_sample_range=1_0.0 ,)
UpperCAmelCase__ = {
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Any=0 ):
if str(lowerCamelCase__ ).startswith('mps' ):
UpperCAmelCase__ = torch.manual_seed(lowerCamelCase__ )
else:
UpperCAmelCase__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
UpperCAmelCase__ = {
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ = 'cpu'
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = self.pipeline_class(**lowerCamelCase__ )
UpperCAmelCase__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) )
UpperCAmelCase__ = output.image_embeds
UpperCAmelCase__ = pipe(
**self.get_dummy_inputs(lowerCamelCase__ ) ,return_dict=lowerCamelCase__ ,)[0]
UpperCAmelCase__ = image[0, -10:]
UpperCAmelCase__ = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
UpperCAmelCase__ = np.array(
[-0.0_5_3_2, 1.7_1_2_0, 0.3_6_5_6, -1.0_8_5_2, -0.8_9_4_6, -1.1_7_5_6, 0.4_3_4_8, 0.2_4_8_2, 0.5_1_4_6, -0.1_1_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __lowerCAmelCase ( self : str ):
UpperCAmelCase__ = torch_device == 'cpu'
UpperCAmelCase__ = True
UpperCAmelCase__ = False
self._test_inference_batch_single_identical(
test_max_difference=lowerCamelCase__ ,relax_max_difference=lowerCamelCase__ ,test_mean_pixel_difference=lowerCamelCase__ ,)
@skip_mps
def __lowerCAmelCase ( self : Union[str, Any] ):
UpperCAmelCase__ = torch_device == 'cpu'
UpperCAmelCase__ = False
self._test_attention_slicing_forward_pass(
test_max_difference=lowerCamelCase__ ,test_mean_pixel_difference=lowerCamelCase__ ,)
| 632 | """simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCAmelCase__ : Union[str, Any] = False
class snake_case ( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' ,torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe.dual_guided(
prompt='first prompt' ,image=lowerCamelCase__ ,text_to_image_strength=0.7_5 ,generator=lowerCamelCase__ ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='numpy' ,).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase__ )
UpperCAmelCase__ = VersatileDiffusionPipeline.from_pretrained(lowerCamelCase__ ,torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = generator.manual_seed(0 )
UpperCAmelCase__ = pipe.dual_guided(
prompt='first prompt' ,image=lowerCamelCase__ ,text_to_image_strength=0.7_5 ,generator=lowerCamelCase__ ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='numpy' ,).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' ,torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = 'cyberpunk 2077'
UpperCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe.dual_guided(
prompt=lowerCamelCase__ ,image=lowerCamelCase__ ,text_to_image_strength=0.7_5 ,generator=lowerCamelCase__ ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type='numpy' ,).images
UpperCAmelCase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase__ = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCAmelCase__ = 'A painting of a squirrel eating a burger '
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe.text_to_image(
prompt=lowerCamelCase__ ,generator=lowerCamelCase__ ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type='numpy' ).images
UpperCAmelCase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase__ = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCAmelCase__ = pipe.image_variation(lowerCamelCase__ ,generator=lowerCamelCase__ ,output_type='numpy' ).images
UpperCAmelCase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase__ = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 632 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
lowerCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase__ : int = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all BART models at https://huggingface.co/models?filter=bart
lowerCAmelCase__ : Any = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
'tokenizer_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json',
},
}
lowerCAmelCase__ : List[str] = {
'facebook/bart-base': 1_024,
'facebook/bart-large': 1_024,
'facebook/bart-large-mnli': 1_024,
'facebook/bart-large-cnn': 1_024,
'facebook/bart-large-xsum': 1_024,
'yjernite/bart_eli5': 1_024,
}
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ["input_ids", "attention_mask"]
snake_case__ = BartTokenizer
def __init__( self : Any ,lowerCamelCase__ : Tuple=None ,lowerCamelCase__ : List[Any]=None ,lowerCamelCase__ : Any=None ,lowerCamelCase__ : Dict="replace" ,lowerCamelCase__ : Tuple="<s>" ,lowerCamelCase__ : List[str]="</s>" ,lowerCamelCase__ : Union[str, Any]="</s>" ,lowerCamelCase__ : Union[str, Any]="<s>" ,lowerCamelCase__ : Optional[Any]="<unk>" ,lowerCamelCase__ : str="<pad>" ,lowerCamelCase__ : List[str]="<mask>" ,lowerCamelCase__ : List[str]=False ,lowerCamelCase__ : List[Any]=True ,**lowerCamelCase__ : List[str] ,):
super().__init__(
lowerCamelCase__ ,lowerCamelCase__ ,tokenizer_file=lowerCamelCase__ ,errors=lowerCamelCase__ ,bos_token=lowerCamelCase__ ,eos_token=lowerCamelCase__ ,sep_token=lowerCamelCase__ ,cls_token=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,pad_token=lowerCamelCase__ ,mask_token=lowerCamelCase__ ,add_prefix_space=lowerCamelCase__ ,trim_offsets=lowerCamelCase__ ,**lowerCamelCase__ ,)
UpperCAmelCase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' ,lowerCamelCase__ ) != add_prefix_space:
UpperCAmelCase__ = getattr(lowerCamelCase__ ,pre_tok_state.pop('type' ) )
UpperCAmelCase__ = add_prefix_space
UpperCAmelCase__ = pre_tok_class(**lowerCamelCase__ )
UpperCAmelCase__ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCAmelCase__ = 'post_processor'
UpperCAmelCase__ = getattr(self.backend_tokenizer ,lowerCamelCase__ ,lowerCamelCase__ )
if tokenizer_component_instance:
UpperCAmelCase__ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase__ = tuple(state['sep'] )
if "cls" in state:
UpperCAmelCase__ = tuple(state['cls'] )
UpperCAmelCase__ = False
if state.get('add_prefix_space' ,lowerCamelCase__ ) != add_prefix_space:
UpperCAmelCase__ = add_prefix_space
UpperCAmelCase__ = True
if state.get('trim_offsets' ,lowerCamelCase__ ) != trim_offsets:
UpperCAmelCase__ = trim_offsets
UpperCAmelCase__ = True
if changes_to_apply:
UpperCAmelCase__ = getattr(lowerCamelCase__ ,state.pop('type' ) )
UpperCAmelCase__ = component_class(**lowerCamelCase__ )
setattr(self.backend_tokenizer ,lowerCamelCase__ ,lowerCamelCase__ )
@property
def __lowerCAmelCase ( self : str ):
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : Any ):
UpperCAmelCase__ = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else value
UpperCAmelCase__ = value
def __lowerCAmelCase ( self : Optional[Any] ,*lowerCamelCase__ : Any ,**lowerCamelCase__ : Tuple ):
UpperCAmelCase__ = kwargs.get('is_split_into_words' ,lowerCamelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*lowerCamelCase__ ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : Any ,*lowerCamelCase__ : Optional[Any] ,**lowerCamelCase__ : List[Any] ):
UpperCAmelCase__ = kwargs.get('is_split_into_words' ,lowerCamelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._encode_plus(*lowerCamelCase__ ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ):
UpperCAmelCase__ = self._tokenizer.model.save(lowerCamelCase__ ,name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[int] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Union[str, Any]=None ):
UpperCAmelCase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 632 | """simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class snake_case ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = [R"h\.\d+\.attn\.bias", R"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self : str ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : int = 50_257 ,lowerCamelCase__ : int = 1_024 ,lowerCamelCase__ : int = 768 ,lowerCamelCase__ : int = 12 ,lowerCamelCase__ : int = 12 ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : str = "gelu_new" ,lowerCamelCase__ : float = 0.1 ,lowerCamelCase__ : float = 0.1 ,lowerCamelCase__ : float = 0.1 ,lowerCamelCase__ : float = 1e-5 ,lowerCamelCase__ : float = 0.0_2 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = False ,):
super().__init__()
UpperCAmelCase__ = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
f''' `n_embd`: {n_embd} are not equal.''' )
UpperCAmelCase__ = prefix_inner_dim
UpperCAmelCase__ = prefix_hidden_dim
UpperCAmelCase__ = (
nn.Linear(self.prefix_inner_dim ,self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
UpperCAmelCase__ = (
nn.Linear(self.prefix_hidden_dim ,lowerCamelCase__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
UpperCAmelCase__ = GPTaConfig(
vocab_size=lowerCamelCase__ ,n_positions=lowerCamelCase__ ,n_embd=lowerCamelCase__ ,n_layer=lowerCamelCase__ ,n_head=lowerCamelCase__ ,n_inner=lowerCamelCase__ ,activation_function=lowerCamelCase__ ,resid_pdrop=lowerCamelCase__ ,embd_pdrop=lowerCamelCase__ ,attn_pdrop=lowerCamelCase__ ,layer_norm_epsilon=lowerCamelCase__ ,initializer_range=lowerCamelCase__ ,scale_attn_weights=lowerCamelCase__ ,use_cache=lowerCamelCase__ ,scale_attn_by_inverse_layer_idx=lowerCamelCase__ ,reorder_and_upcast_attn=lowerCamelCase__ ,)
UpperCAmelCase__ = GPTaLMHeadModel(lowerCamelCase__ )
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : torch.Tensor ,lowerCamelCase__ : torch.Tensor ,lowerCamelCase__ : Optional[torch.Tensor] = None ,lowerCamelCase__ : Optional[torch.Tensor] = None ,):
UpperCAmelCase__ = self.transformer.transformer.wte(lowerCamelCase__ )
UpperCAmelCase__ = self.encode_prefix(lowerCamelCase__ )
UpperCAmelCase__ = self.decode_prefix(lowerCamelCase__ )
UpperCAmelCase__ = torch.cat((prefix_embeds, embedding_text) ,dim=1 )
if labels is not None:
UpperCAmelCase__ = self.get_dummy_token(input_ids.shape[0] ,input_ids.device )
UpperCAmelCase__ = torch.cat((dummy_token, input_ids) ,dim=1 )
UpperCAmelCase__ = self.transformer(inputs_embeds=lowerCamelCase__ ,labels=lowerCamelCase__ ,attention_mask=lowerCamelCase__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : torch.device ):
return torch.zeros(lowerCamelCase__ ,self.prefix_length ,dtype=torch.intaa ,device=lowerCamelCase__ )
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : List[str] ):
return self.encode_prefix(lowerCamelCase__ )
@torch.no_grad()
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Optional[int] ):
UpperCAmelCase__ = torch.split(lowerCamelCase__ ,1 ,dim=0 )
UpperCAmelCase__ = []
UpperCAmelCase__ = []
for feature in features:
UpperCAmelCase__ = self.decode_prefix(feature.to(lowerCamelCase__ ) ) # back to the clip feature
# Only support beam search for now
UpperCAmelCase__ , UpperCAmelCase__ = self.generate_beam(
input_embeds=lowerCamelCase__ ,device=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
UpperCAmelCase__ = torch.stack(lowerCamelCase__ )
UpperCAmelCase__ = torch.stack(lowerCamelCase__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : str=None ,lowerCamelCase__ : List[Any]=None ,lowerCamelCase__ : Tuple=None ,lowerCamelCase__ : int = 5 ,lowerCamelCase__ : int = 67 ,lowerCamelCase__ : float = 1.0 ,lowerCamelCase__ : Optional[int] = None ,):
UpperCAmelCase__ = eos_token_id
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = torch.ones(lowerCamelCase__ ,device=lowerCamelCase__ ,dtype=torch.int )
UpperCAmelCase__ = torch.zeros(lowerCamelCase__ ,device=lowerCamelCase__ ,dtype=torch.bool )
if input_embeds is not None:
UpperCAmelCase__ = input_embeds
else:
UpperCAmelCase__ = self.transformer.transformer.wte(lowerCamelCase__ )
for i in range(lowerCamelCase__ ):
UpperCAmelCase__ = self.transformer(inputs_embeds=lowerCamelCase__ )
UpperCAmelCase__ = outputs.logits
UpperCAmelCase__ = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
UpperCAmelCase__ = logits.softmax(-1 ).log()
if scores is None:
UpperCAmelCase__ , UpperCAmelCase__ = logits.topk(lowerCamelCase__ ,-1 )
UpperCAmelCase__ = generated.expand(lowerCamelCase__ ,*generated.shape[1:] )
UpperCAmelCase__ , UpperCAmelCase__ = next_tokens.permute(1 ,0 ), scores.squeeze(0 )
if tokens is None:
UpperCAmelCase__ = next_tokens
else:
UpperCAmelCase__ = tokens.expand(lowerCamelCase__ ,*tokens.shape[1:] )
UpperCAmelCase__ = torch.cat((tokens, next_tokens) ,dim=1 )
else:
UpperCAmelCase__ = -float(np.inf )
UpperCAmelCase__ = 0
UpperCAmelCase__ = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
UpperCAmelCase__ = scores_sum / seq_lengths[:, None]
UpperCAmelCase__ , UpperCAmelCase__ = scores_sum_average.view(-1 ).topk(lowerCamelCase__ ,-1 )
UpperCAmelCase__ = next_tokens // scores_sum.shape[1]
UpperCAmelCase__ = seq_lengths[next_tokens_source]
UpperCAmelCase__ = next_tokens % scores_sum.shape[1]
UpperCAmelCase__ = next_tokens.unsqueeze(1 )
UpperCAmelCase__ = tokens[next_tokens_source]
UpperCAmelCase__ = torch.cat((tokens, next_tokens) ,dim=1 )
UpperCAmelCase__ = generated[next_tokens_source]
UpperCAmelCase__ = scores_sum_average * seq_lengths
UpperCAmelCase__ = is_stopped[next_tokens_source]
UpperCAmelCase__ = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] ,1 ,-1 )
UpperCAmelCase__ = torch.cat((generated, next_token_embed) ,dim=1 )
UpperCAmelCase__ = is_stopped + next_tokens.eq(lowerCamelCase__ ).squeeze()
if is_stopped.all():
break
UpperCAmelCase__ = scores / seq_lengths
UpperCAmelCase__ = scores.argsort(descending=lowerCamelCase__ )
# tokens tensors are already padded to max_seq_length
UpperCAmelCase__ = [tokens[i] for i in order]
UpperCAmelCase__ = torch.stack(lowerCamelCase__ ,dim=0 )
UpperCAmelCase__ = torch.tensor([seq_lengths[i] for i in order] ,dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 632 | 1 |
"""simple docstring"""
from __future__ import annotations
class snake_case :
"""simple docstring"""
def __init__( self : Tuple ,lowerCamelCase__ : str ,lowerCamelCase__ : str ):
UpperCAmelCase__ , UpperCAmelCase__ = text, pattern
UpperCAmelCase__ , UpperCAmelCase__ = len(lowerCamelCase__ ), len(lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : str ):
for i in range(self.patLen - 1 ,-1 ,-1 ):
if char == self.pattern[i]:
return i
return -1
def __lowerCAmelCase ( self : List[str] ,lowerCamelCase__ : int ):
for i in range(self.patLen - 1 ,-1 ,-1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def __lowerCAmelCase ( self : Optional[int] ):
# searches pattern in text and returns index positions
UpperCAmelCase__ = []
for i in range(self.textLen - self.patLen + 1 ):
UpperCAmelCase__ = self.mismatch_in_text(lowerCamelCase__ )
if mismatch_index == -1:
positions.append(lowerCamelCase__ )
else:
UpperCAmelCase__ = self.match_in_pattern(self.text[mismatch_index] )
UpperCAmelCase__ = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
lowerCAmelCase__ : List[Any] = 'ABAABA'
lowerCAmelCase__ : int = 'AB'
lowerCAmelCase__ : Any = BoyerMooreSearch(text, pattern)
lowerCAmelCase__ : Optional[int] = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions)
| 632 | """simple docstring"""
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
lowerCAmelCase__ : str = 'base_with_context'
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=lowerCamelCase )
for lyr_num, lyr in enumerate(model.encoders ):
UpperCAmelCase__ = weights[f'''layers_{lyr_num}''']
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
UpperCAmelCase__ = ly_weight['attention']
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=lowerCamelCase )
for lyr_num, lyr in enumerate(model.encoders ):
UpperCAmelCase__ = weights[f'''layers_{lyr_num}''']
UpperCAmelCase__ = ly_weight['attention']
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=lowerCamelCase )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
UpperCAmelCase__ = weights[f'''layers_{lyr_num}''']
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) )
UpperCAmelCase__ = ly_weight['self_attention']
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
UpperCAmelCase__ = ly_weight['MultiHeadDotProductAttention_0']
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) )
return model
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = checkpoints.load_tax_checkpoint(args.checkpoint_path )
UpperCAmelCase__ = jnp.tree_util.tree_map(onp.array , lowerCamelCase )
UpperCAmelCase__ = [
'from __gin__ import dynamic_registration',
'from music_spectrogram_diffusion.models.diffusion import diffusion_utils',
'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0',
'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()',
]
UpperCAmelCase__ = os.path.join(args.checkpoint_path , '..' , 'config.gin' )
UpperCAmelCase__ = inference.parse_training_gin_file(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = inference.InferenceModel(args.checkpoint_path , lowerCamelCase )
UpperCAmelCase__ = DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' )
UpperCAmelCase__ = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
UpperCAmelCase__ = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
UpperCAmelCase__ = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
UpperCAmelCase__ = load_notes_encoder(ta_checkpoint['target']['token_encoder'] , lowerCamelCase )
UpperCAmelCase__ = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , lowerCamelCase )
UpperCAmelCase__ = load_decoder(ta_checkpoint['target']['decoder'] , lowerCamelCase )
UpperCAmelCase__ = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' )
UpperCAmelCase__ = SpectrogramDiffusionPipeline(
notes_encoder=lowerCamelCase , continuous_encoder=lowerCamelCase , decoder=lowerCamelCase , scheduler=lowerCamelCase , melgan=lowerCamelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
lowerCAmelCase__ : Tuple = argparse.ArgumentParser()
parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument(
'--checkpoint_path',
default=F"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help='Path to the original jax model checkpoint.',
)
lowerCAmelCase__ : List[str] = parser.parse_args()
main(args)
| 632 | 1 |
"""simple docstring"""
from __future__ import annotations
lowerCAmelCase__ : Optional[Any] = 1.6021E-19 # units = C
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif conductivity < 0:
raise ValueError('Conductivity cannot be negative' )
elif electron_conc < 0:
raise ValueError('Electron concentration cannot be negative' )
elif mobility < 0:
raise ValueError('mobility cannot be negative' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 632 | """simple docstring"""
import socket
def a_ ( ):
UpperCAmelCase__ = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
UpperCAmelCase__ = socket.gethostname()
UpperCAmelCase__ = 1_2_3_1_2
sock.connect((host, port) )
sock.send(b'Hello server!' )
with open('Received_file' , 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
UpperCAmelCase__ = sock.recv(1_0_2_4 )
if not data:
break
out_file.write(lowerCamelCase )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 632 | 1 |
"""simple docstring"""
lowerCAmelCase__ : List[str] = [0, 2, 4, 6, 8]
lowerCAmelCase__ : int = [1, 3, 5, 7, 9]
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 1_0
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
UpperCAmelCase__ = 0
for digit in range(1_0 ):
UpperCAmelCase__ = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 1_0 , lowerCamelCase , lowerCamelCase )
return result
UpperCAmelCase__ = 0
for digita in range(1_0 ):
UpperCAmelCase__ = digita
if (remainder + digita) % 2 == 0:
UpperCAmelCase__ = ODD_DIGITS
else:
UpperCAmelCase__ = EVEN_DIGITS
for digita in other_parity_digits:
UpperCAmelCase__ = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 1_0 , lowerCamelCase , lowerCamelCase , )
return result
def a_ ( lowerCamelCase = 9 ):
UpperCAmelCase__ = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(lowerCamelCase , 0 , [0] * length , lowerCamelCase )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 632 | """simple docstring"""
from __future__ import annotations
class snake_case :
"""simple docstring"""
def __init__( self : Dict ,lowerCamelCase__ : list[list[int]] ):
UpperCAmelCase__ = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.' )
if len(lowerCamelCase__ ) != 0:
UpperCAmelCase__ = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(lowerCamelCase__ ) != cols:
raise error
for value in row:
if not isinstance(lowerCamelCase__ ,(int, float) ):
raise error
UpperCAmelCase__ = rows
else:
UpperCAmelCase__ = []
def __lowerCAmelCase ( self : Union[str, Any] ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def __lowerCAmelCase ( self : str ):
return len(self.rows )
@property
def __lowerCAmelCase ( self : List[Any] ):
return len(self.rows[0] )
@property
def __lowerCAmelCase ( self : Any ):
return (self.num_rows, self.num_columns)
@property
def __lowerCAmelCase ( self : Optional[int] ):
return self.order[0] == self.order[1]
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(lowerCamelCase__ )
def __lowerCAmelCase ( self : str ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def __lowerCAmelCase ( self : List[str] ):
return bool(self.determinant() )
def __lowerCAmelCase ( self : Any ,lowerCamelCase__ : int ,lowerCamelCase__ : int ):
UpperCAmelCase__ = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(lowerCamelCase__ ).determinant()
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : int ):
if (row + column) % 2 == 0:
return self.get_minor(lowerCamelCase__ ,lowerCamelCase__ )
return -1 * self.get_minor(lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Union[str, Any] ):
return Matrix(
[
[self.get_minor(lowerCamelCase__ ,lowerCamelCase__ ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def __lowerCAmelCase ( self : int ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse' )
return self.adjugate() * (1 / determinant)
def __repr__( self : Optional[Any] ):
return str(self.rows )
def __str__( self : List[str] ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(lowerCamelCase__ ) for value in row] ) + '.]'
for row in self.rows
] )
+ "]"
)
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : list[int] ,lowerCamelCase__ : int | None = None ):
UpperCAmelCase__ = TypeError('Row must be a list containing all ints and/or floats' )
if not isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
raise type_error
for value in row:
if not isinstance(lowerCamelCase__ ,(int, float) ):
raise type_error
if len(lowerCamelCase__ ) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix' )
if position is None:
self.rows.append(lowerCamelCase__ )
else:
UpperCAmelCase__ = self.rows[0:position] + [row] + self.rows[position:]
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : list[int] ,lowerCamelCase__ : int | None = None ):
UpperCAmelCase__ = TypeError(
'Column must be a list containing all ints and/or floats' )
if not isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
raise type_error
for value in column:
if not isinstance(lowerCamelCase__ ,(int, float) ):
raise type_error
if len(lowerCamelCase__ ) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix' )
if position is None:
UpperCAmelCase__ = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
UpperCAmelCase__ = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : List[Any] ,lowerCamelCase__ : object ):
if not isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : Any ,lowerCamelCase__ : object ):
return not self == other
def __neg__( self : Dict ):
return self * -1
def __add__( self : str ,lowerCamelCase__ : Matrix ):
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : List[str] ,lowerCamelCase__ : Matrix ):
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : List[str] ,lowerCamelCase__ : Matrix | int | float ):
if isinstance(lowerCamelCase__ ,(int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second' )
return Matrix(
[
[Matrix.dot_product(lowerCamelCase__ ,lowerCamelCase__ ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix' )
def __pow__( self : Optional[int] ,lowerCamelCase__ : int ):
if not isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
raise TypeError('A Matrix can only be raised to the power of an int' )
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power' )
UpperCAmelCase__ = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def __lowerCAmelCase ( cls : Union[str, Any] ,lowerCamelCase__ : list[int] ,lowerCamelCase__ : list[int] ):
return sum(row[i] * column[i] for i in range(len(lowerCamelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 632 | 1 |
"""simple docstring"""
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
lowerCAmelCase__ : List[str] = logging.get_logger(__name__)
lowerCAmelCase__ : List[str] = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
lowerCAmelCase__ : Any = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
lowerCAmelCase__ : Dict = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
lowerCAmelCase__ : int = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
lowerCAmelCase__ : Optional[Any] = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
lowerCAmelCase__ : str = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
lowerCAmelCase__ : Any = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
lowerCAmelCase__ : Any = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
lowerCAmelCase__ : int = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
lowerCAmelCase__ : Tuple = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
lowerCAmelCase__ : Tuple = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
lowerCAmelCase__ : Tuple = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
lowerCAmelCase__ : int = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
lowerCAmelCase__ : Any = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
lowerCAmelCase__ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
lowerCAmelCase__ : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
lowerCAmelCase__ : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
lowerCAmelCase__ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
lowerCAmelCase__ : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase__ : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
lowerCAmelCase__ : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
lowerCAmelCase__ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase__ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
lowerCAmelCase__ : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase__ : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
lowerCAmelCase__ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
lowerCAmelCase__ : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
lowerCAmelCase__ : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
snake_case__ = FLAX_MODEL_MAPPING
lowerCAmelCase__ : Optional[int] = auto_class_update(FlaxAutoModel)
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
snake_case__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
lowerCAmelCase__ : Optional[Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
snake_case__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
lowerCAmelCase__ : Optional[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
snake_case__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
lowerCAmelCase__ : List[str] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
snake_case__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase__ : Dict = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
snake_case__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCAmelCase__ : int = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
snake_case__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
lowerCAmelCase__ : Optional[int] = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
snake_case__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCAmelCase__ : Optional[int] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
snake_case__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
lowerCAmelCase__ : int = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
snake_case__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
lowerCAmelCase__ : Optional[int] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
snake_case__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCAmelCase__ : List[Any] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
snake_case__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCAmelCase__ : Union[str, Any] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
snake_case__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
lowerCAmelCase__ : Optional[int] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 632 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase__ : int = {
'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'],
'tokenization_tapas': ['TapasTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Union[str, Any] = [
'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TapasForMaskedLM',
'TapasForQuestionAnswering',
'TapasForSequenceClassification',
'TapasModel',
'TapasPreTrainedModel',
'load_tf_weights_in_tapas',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Optional[Any] = [
'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFTapasForMaskedLM',
'TFTapasForQuestionAnswering',
'TFTapasForSequenceClassification',
'TFTapasModel',
'TFTapasPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 632 | 1 |
"""simple docstring"""
import math
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(lowerCamelCase )
def a_ ( lowerCamelCase = 1 / 1_2_3_4_5 ):
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
UpperCAmelCase__ = 3
while True:
UpperCAmelCase__ = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(lowerCamelCase ):
UpperCAmelCase__ = int(lowerCamelCase )
total_partitions += 1
if check_partition_perfect(lowerCamelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(lowerCamelCase )
integer += 1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 632 | """simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
lowerCAmelCase__ : Optional[int] = False
class snake_case ( unittest.TestCase ):
"""simple docstring"""
pass
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion' )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe(
image=lowerCamelCase__ ,generator=lowerCamelCase__ ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type='numpy' ,).images
UpperCAmelCase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase__ = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 632 | 1 |
"""simple docstring"""
import fire
from utils import calculate_rouge, save_json
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=None , **lowerCamelCase ):
UpperCAmelCase__ = [x.strip() for x in open(lowerCamelCase ).readlines()]
UpperCAmelCase__ = [x.strip() for x in open(lowerCamelCase ).readlines()][: len(lowerCamelCase )]
UpperCAmelCase__ = calculate_rouge(lowerCamelCase , lowerCamelCase , **lowerCamelCase )
if save_path is not None:
save_json(lowerCamelCase , lowerCamelCase , indent=lowerCamelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 632 | """simple docstring"""
from __future__ import annotations
from math import ceil, floor, sqrt
def a_ ( lowerCamelCase = 2_0_0_0_0_0_0 ):
UpperCAmelCase__ = [0]
UpperCAmelCase__ = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
UpperCAmelCase__ = 0
# the area corresponding to the grid that gives the product closest to target
UpperCAmelCase__ = 0
# an estimate of b, using the quadratic formula
UpperCAmelCase__ = 42
# the largest integer less than b_estimate
UpperCAmelCase__ = 42
# the largest integer less than b_estimate
UpperCAmelCase__ = 42
# the triangle number corresponding to b_floor
UpperCAmelCase__ = 42
# the triangle number corresponding to b_ceil
UpperCAmelCase__ = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
UpperCAmelCase__ = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
UpperCAmelCase__ = floor(lowerCamelCase )
UpperCAmelCase__ = ceil(lowerCamelCase )
UpperCAmelCase__ = triangle_numbers[b_floor]
UpperCAmelCase__ = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
UpperCAmelCase__ = triangle_b_first_guess * triangle_a
UpperCAmelCase__ = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
UpperCAmelCase__ = triangle_b_second_guess * triangle_a
UpperCAmelCase__ = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F"""{solution() = }""")
| 632 | 1 |
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
lowerCAmelCase__ : Optional[int] = ''
lowerCAmelCase__ : List[Any] = ''
lowerCAmelCase__ : List[str] = ''
lowerCAmelCase__ : List[Any] = 1 # (0 is vertical, 1 is horizontal)
def a_ ( ):
UpperCAmelCase__ , UpperCAmelCase__ = get_dataset(lowerCamelCase , lowerCamelCase )
print('Processing...' )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = update_image_and_anno(lowerCamelCase , lowerCamelCase , lowerCamelCase )
for index, image in enumerate(lowerCamelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
UpperCAmelCase__ = random_chars(3_2 )
UpperCAmelCase__ = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0]
UpperCAmelCase__ = f'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(f'''/{file_root}.jpg''' , lowerCamelCase , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(f'''Success {index+1}/{len(lowerCamelCase )} with {file_name}''' )
UpperCAmelCase__ = []
for anno in new_annos[index]:
UpperCAmelCase__ = f'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(lowerCamelCase )
with open(f'''/{file_root}.txt''' , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = []
UpperCAmelCase__ = []
for label_file in glob.glob(os.path.join(lowerCamelCase , '*.txt' ) ):
UpperCAmelCase__ = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(lowerCamelCase ) as in_file:
UpperCAmelCase__ = in_file.readlines()
UpperCAmelCase__ = os.path.join(lowerCamelCase , f'''{label_name}.jpg''' )
UpperCAmelCase__ = []
for obj_list in obj_lists:
UpperCAmelCase__ = obj_list.rstrip('\n' ).split(' ' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(lowerCamelCase )
labels.append(lowerCamelCase )
return img_paths, labels
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 1 ):
UpperCAmelCase__ = []
UpperCAmelCase__ = []
UpperCAmelCase__ = []
for idx in range(len(lowerCamelCase ) ):
UpperCAmelCase__ = []
UpperCAmelCase__ = img_list[idx]
path_list.append(lowerCamelCase )
UpperCAmelCase__ = anno_list[idx]
UpperCAmelCase__ = cva.imread(lowerCamelCase )
if flip_type == 1:
UpperCAmelCase__ = cva.flip(lowerCamelCase , lowerCamelCase )
for bbox in img_annos:
UpperCAmelCase__ = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
UpperCAmelCase__ = cva.flip(lowerCamelCase , lowerCamelCase )
for bbox in img_annos:
UpperCAmelCase__ = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(lowerCamelCase )
new_imgs_list.append(lowerCamelCase )
return new_imgs_list, new_annos_lists, path_list
def a_ ( lowerCamelCase = 3_2 ):
assert number_char > 1, "The number of character should greater than 1"
UpperCAmelCase__ = ascii_lowercase + digits
return "".join(random.choice(lowerCamelCase ) for _ in range(lowerCamelCase ) )
if __name__ == "__main__":
main()
print('DONE ✅')
| 632 | """simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase__ : Optional[Any] = ['model.decoder.embed_positions.weights']
def a_ ( lowerCamelCase ):
if "emb" in name:
UpperCAmelCase__ = name.replace('emb' , 'model.decoder.embed_tokens' )
if "transformer" in name:
UpperCAmelCase__ = name.replace('transformer' , 'model.decoder' )
if "cross_attention" in name:
UpperCAmelCase__ = name.replace('cross_attention' , 'encoder_attn' )
if "linear1" in name:
UpperCAmelCase__ = name.replace('linear1' , 'fc1' )
if "linear2" in name:
UpperCAmelCase__ = name.replace('linear2' , 'fc2' )
if "norm1" in name:
UpperCAmelCase__ = name.replace('norm1' , 'self_attn_layer_norm' )
if "norm_cross" in name:
UpperCAmelCase__ = name.replace('norm_cross' , 'encoder_attn_layer_norm' )
if "norm2" in name:
UpperCAmelCase__ = name.replace('norm2' , 'final_layer_norm' )
if "out_norm" in name:
UpperCAmelCase__ = name.replace('out_norm' , 'model.decoder.layer_norm' )
if "linears" in name:
UpperCAmelCase__ = name.replace('linears' , 'lm_heads' )
if "condition_provider.conditioners.description.output_proj" in name:
UpperCAmelCase__ = name.replace('condition_provider.conditioners.description.output_proj' , 'enc_to_dec_proj' )
return name
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = list(state_dict.keys() )
UpperCAmelCase__ = {}
for key in keys:
UpperCAmelCase__ = state_dict.pop(lowerCamelCase )
UpperCAmelCase__ = rename_keys(lowerCamelCase )
if "in_proj_weight" in key:
# split fused qkv proj
UpperCAmelCase__ = val[:hidden_size, :]
UpperCAmelCase__ = val[hidden_size : 2 * hidden_size, :]
UpperCAmelCase__ = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
UpperCAmelCase__ = val
else:
UpperCAmelCase__ = val
return state_dict, enc_dec_proj_state_dict
def a_ ( lowerCamelCase ):
if checkpoint == "small":
# default config values
UpperCAmelCase__ = 1_0_2_4
UpperCAmelCase__ = 2_4
UpperCAmelCase__ = 1_6
elif checkpoint == "medium":
UpperCAmelCase__ = 1_5_3_6
UpperCAmelCase__ = 4_8
UpperCAmelCase__ = 2_4
elif checkpoint == "large":
UpperCAmelCase__ = 2_0_4_8
UpperCAmelCase__ = 4_8
UpperCAmelCase__ = 3_2
else:
raise ValueError(f'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
UpperCAmelCase__ = MusicgenDecoderConfig(
hidden_size=lowerCamelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=lowerCamelCase , num_attention_heads=lowerCamelCase , )
return config
@torch.no_grad()
def a_ ( lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="cpu" ):
UpperCAmelCase__ = MusicGen.get_pretrained(lowerCamelCase , device=lowerCamelCase )
UpperCAmelCase__ = decoder_config_from_checkpoint(lowerCamelCase )
UpperCAmelCase__ = fairseq_model.lm.state_dict()
UpperCAmelCase__ , UpperCAmelCase__ = rename_state_dict(
lowerCamelCase , hidden_size=decoder_config.hidden_size )
UpperCAmelCase__ = TaEncoderModel.from_pretrained('t5-base' )
UpperCAmelCase__ = EncodecModel.from_pretrained('facebook/encodec_32khz' )
UpperCAmelCase__ = MusicgenForCausalLM(lowerCamelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
UpperCAmelCase__ , UpperCAmelCase__ = decoder.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
for key in missing_keys.copy():
if key.startswith(('text_encoder', 'audio_encoder') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowerCamelCase )
if len(lowerCamelCase ) > 0:
raise ValueError(f'''Missing key(s) in state_dict: {missing_keys}''' )
if len(lowerCamelCase ) > 0:
raise ValueError(f'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
UpperCAmelCase__ = MusicgenForConditionalGeneration(text_encoder=lowerCamelCase , audio_encoder=lowerCamelCase , decoder=lowerCamelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowerCamelCase )
# check we can do a forward pass
UpperCAmelCase__ = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
UpperCAmelCase__ = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
UpperCAmelCase__ = model(input_ids=lowerCamelCase , decoder_input_ids=lowerCamelCase ).logits
if logits.shape != (8, 1, 2_0_4_8):
raise ValueError('Incorrect shape for logits' )
# now construct the processor
UpperCAmelCase__ = AutoTokenizer.from_pretrained('t5-base' )
UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained('facebook/encodec_32khz' , padding_side='left' )
UpperCAmelCase__ = MusicgenProcessor(feature_extractor=lowerCamelCase , tokenizer=lowerCamelCase )
# set the appropriate bos/pad token ids
UpperCAmelCase__ = 2_0_4_8
UpperCAmelCase__ = 2_0_4_8
# set other default generation config params
UpperCAmelCase__ = int(3_0 * audio_encoder.config.frame_rate )
UpperCAmelCase__ = True
UpperCAmelCase__ = 3.0
if pytorch_dump_folder is not None:
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
logger.info(f'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(lowerCamelCase )
processor.save_pretrained(lowerCamelCase )
if repo_id:
logger.info(f'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(lowerCamelCase )
processor.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint',
default='small',
type=str,
help='Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.',
)
parser.add_argument(
'--pytorch_dump_folder',
required=True,
default=None,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
parser.add_argument(
'--device', default='cpu', type=str, help='Torch device to run the conversion, either cpu or cuda.'
)
lowerCAmelCase__ : List[str] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 632 | 1 |
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = OmegaConf.load(lowerCamelCase )
UpperCAmelCase__ = torch.load(lowerCamelCase , map_location='cpu' )['model']
UpperCAmelCase__ = list(state_dict.keys() )
# extract state_dict for VQVAE
UpperCAmelCase__ = {}
UpperCAmelCase__ = 'first_stage_model.'
for key in keys:
if key.startswith(lowerCamelCase ):
UpperCAmelCase__ = state_dict[key]
# extract state_dict for UNetLDM
UpperCAmelCase__ = {}
UpperCAmelCase__ = 'model.diffusion_model.'
for key in keys:
if key.startswith(lowerCamelCase ):
UpperCAmelCase__ = state_dict[key]
UpperCAmelCase__ = config.model.params.first_stage_config.params
UpperCAmelCase__ = config.model.params.unet_config.params
UpperCAmelCase__ = VQModel(**lowerCamelCase ).eval()
vqvae.load_state_dict(lowerCamelCase )
UpperCAmelCase__ = UNetLDMModel(**lowerCamelCase ).eval()
unet.load_state_dict(lowerCamelCase )
UpperCAmelCase__ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='scaled_linear' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=lowerCamelCase , )
UpperCAmelCase__ = LDMPipeline(lowerCamelCase , lowerCamelCase , lowerCamelCase )
pipeline.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase__ : Dict = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
lowerCAmelCase__ : Optional[Any] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 632 | """simple docstring"""
lowerCAmelCase__ : Tuple = range(2, 20 + 1)
lowerCAmelCase__ : Optional[Any] = [10**k for k in range(ks[-1] + 1)]
lowerCAmelCase__ : dict[int, dict[int, list[list[int]]]] = {}
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = sum(a_i[j] for j in range(lowerCamelCase , len(lowerCamelCase ) ) )
UpperCAmelCase__ = sum(a_i[j] * base[j] for j in range(min(len(lowerCamelCase ) , lowerCamelCase ) ) )
UpperCAmelCase__ , UpperCAmelCase__ = 0, 0
UpperCAmelCase__ = n - i
UpperCAmelCase__ = memo.get(lowerCamelCase )
if sub_memo is not None:
UpperCAmelCase__ = sub_memo.get(lowerCamelCase )
if jumps is not None and len(lowerCamelCase ) > 0:
# find and make the largest jump without going over
UpperCAmelCase__ = -1
for _k in range(len(lowerCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
UpperCAmelCase__ = _k
break
if max_jump >= 0:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = jumps[max_jump]
# since the difference between jumps is cached, add c
UpperCAmelCase__ = diff + c
for j in range(min(lowerCamelCase , len(lowerCamelCase ) ) ):
UpperCAmelCase__ , UpperCAmelCase__ = divmod(lowerCamelCase , 1_0 )
if new_c > 0:
add(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
UpperCAmelCase__ = []
else:
UpperCAmelCase__ = {c: []}
UpperCAmelCase__ = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
UpperCAmelCase__ , UpperCAmelCase__ = next_term(lowerCamelCase , k - 1 , i + dn , lowerCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
UpperCAmelCase__ , UpperCAmelCase__ = compute(lowerCamelCase , lowerCamelCase , i + dn , lowerCamelCase )
diff += _diff
dn += terms_jumped
UpperCAmelCase__ = sub_memo[c]
# keep jumps sorted by # of terms skipped
UpperCAmelCase__ = 0
while j < len(lowerCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(lowerCamelCase , (diff, dn, k) )
return (diff, dn)
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if i >= n:
return 0, i
if k > len(lowerCamelCase ):
a_i.extend([0 for _ in range(k - len(lowerCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
UpperCAmelCase__ = i
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 0, 0, 0
for j in range(len(lowerCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
UpperCAmelCase__ = ds_c + ds_b
diff += addend
UpperCAmelCase__ = 0
for j in range(lowerCamelCase ):
UpperCAmelCase__ = a_i[j] + addend
UpperCAmelCase__ , UpperCAmelCase__ = divmod(lowerCamelCase , 1_0 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return diff, i - start_i
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for j in range(lowerCamelCase , len(lowerCamelCase ) ):
UpperCAmelCase__ = digits[j] + addend
if s >= 1_0:
UpperCAmelCase__ , UpperCAmelCase__ = divmod(lowerCamelCase , 1_0 )
UpperCAmelCase__ = addend // 1_0 + quotient
else:
UpperCAmelCase__ = s
UpperCAmelCase__ = addend // 1_0
if addend == 0:
break
while addend > 0:
UpperCAmelCase__ , UpperCAmelCase__ = divmod(lowerCamelCase , 1_0 )
digits.append(lowerCamelCase )
def a_ ( lowerCamelCase = 1_0**1_5 ):
UpperCAmelCase__ = [1]
UpperCAmelCase__ = 1
UpperCAmelCase__ = 0
while True:
UpperCAmelCase__ , UpperCAmelCase__ = next_term(lowerCamelCase , 2_0 , i + dn , lowerCamelCase )
dn += terms_jumped
if dn == n - i:
break
UpperCAmelCase__ = 0
for j in range(len(lowerCamelCase ) ):
a_n += digits[j] * 1_0**j
return a_n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 632 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase__ : int = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[Any] = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Optional[Any] = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 632 | """simple docstring"""
import random
class snake_case :
"""simple docstring"""
@staticmethod
def __lowerCAmelCase ( lowerCamelCase__ : str ):
UpperCAmelCase__ = [ord(lowerCamelCase__ ) for i in text]
UpperCAmelCase__ = []
UpperCAmelCase__ = []
for i in plain:
UpperCAmelCase__ = random.randint(1 ,300 )
UpperCAmelCase__ = (i + k) * k
cipher.append(lowerCamelCase__ )
key.append(lowerCamelCase__ )
return cipher, key
@staticmethod
def __lowerCAmelCase ( lowerCamelCase__ : list[int] ,lowerCamelCase__ : list[int] ):
UpperCAmelCase__ = []
for i in range(len(lowerCamelCase__ ) ):
UpperCAmelCase__ = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(lowerCamelCase__ ) )
return "".join(lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ , lowerCAmelCase__ : Dict = Onepad().encrypt('Hello')
print(c, k)
print(Onepad().decrypt(c, k))
| 632 | 1 |
"""simple docstring"""
def a_ ( lowerCamelCase = 1_0_0_0_0_0_0 ):
UpperCAmelCase__ = limit + 1
UpperCAmelCase__ = [0] * limit
for first_term in range(1 , lowerCamelCase ):
for n in range(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
UpperCAmelCase__ = sum(1 for x in frequency[1:limit] if x == 1_0 )
return count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 632 | """simple docstring"""
import re
def a_ ( lowerCamelCase ):
return [char.split() for char in re.split(r'[^ a-z A-Z 0-9 \s]' , str_ )]
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = split_input(str_ )
return "".join(
[''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
try:
UpperCAmelCase__ = split_input(lowerCamelCase )
if upper:
UpperCAmelCase__ = ''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
UpperCAmelCase__ = ''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def a_ ( lowerCamelCase ):
return to_simple_case(lowerCamelCase )
def a_ ( lowerCamelCase ):
try:
UpperCAmelCase__ = to_simple_case(lowerCamelCase )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def a_ ( lowerCamelCase , lowerCamelCase ):
return to_complex_case(lowerCamelCase , lowerCamelCase , '_' )
def a_ ( lowerCamelCase , lowerCamelCase ):
return to_complex_case(lowerCamelCase , lowerCamelCase , '-' )
if __name__ == "__main__":
__import__('doctest').testmod()
| 632 | 1 |
"""simple docstring"""
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = 0
snake_case__ = False
snake_case__ = 3.0
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : str ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() ,{} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() ,{'a': 2} )
self.assertDictEqual(MockClass(a=2 ,b=lowerCamelCase__ ).to_kwargs() ,{'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 ,c=2.2_5 ).to_kwargs() ,{'a': 2, 'c': 2.2_5} )
@require_cuda
def __lowerCAmelCase ( self : str ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
UpperCAmelCase__ = GradScalerKwargs(init_scale=1_024 ,growth_factor=2 )
AcceleratorState._reset_state()
UpperCAmelCase__ = Accelerator(mixed_precision='fp16' ,kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
UpperCAmelCase__ = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale ,1_0_2_4.0 )
self.assertEqual(scaler._growth_factor ,2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor ,0.5 )
self.assertEqual(scaler._growth_interval ,2_000 )
self.assertEqual(scaler._enabled ,lowerCamelCase__ )
@require_multi_gpu
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = ['torchrun', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(lowerCamelCase__ ,env=os.environ.copy() )
if __name__ == "__main__":
lowerCAmelCase__ : Tuple = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
lowerCAmelCase__ : Any = Accelerator(kwargs_handlers=[ddp_scaler])
lowerCAmelCase__ : Union[str, Any] = torch.nn.Linear(100, 200)
lowerCAmelCase__ : List[Any] = accelerator.prepare(model)
# Check the values changed in kwargs
lowerCAmelCase__ : List[str] = ''
lowerCAmelCase__ : Union[str, Any] = model.bucket_bytes_cap // (1_024 * 1_024)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 632 | """simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = SamImageProcessor()
UpperCAmelCase__ = SamProcessor(lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : str ,**lowerCamelCase__ : Dict ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**lowerCamelCase__ ).image_processor
def __lowerCAmelCase ( self : Optional[int] ):
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self : Dict ):
UpperCAmelCase__ = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
UpperCAmelCase__ = [Image.fromarray(np.moveaxis(lowerCamelCase__ ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ = self.get_image_processor(do_normalize=lowerCamelCase__ ,padding_value=1.0 )
UpperCAmelCase__ = SamProcessor.from_pretrained(self.tmpdirname ,do_normalize=lowerCamelCase__ ,padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = SamProcessor(image_processor=lowerCamelCase__ )
UpperCAmelCase__ = self.prepare_image_inputs()
UpperCAmelCase__ = image_processor(lowerCamelCase__ ,return_tensors='np' )
UpperCAmelCase__ = processor(images=lowerCamelCase__ ,return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
@require_torch
def __lowerCAmelCase ( self : Dict ):
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = SamProcessor(image_processor=lowerCamelCase__ )
UpperCAmelCase__ = [torch.ones((1, 3, 5, 5) )]
UpperCAmelCase__ = [[1_764, 2_646]]
UpperCAmelCase__ = [[683, 1_024]]
UpperCAmelCase__ = processor.post_process_masks(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
self.assertEqual(masks[0].shape ,(1, 3, 1_764, 2_646) )
UpperCAmelCase__ = processor.post_process_masks(
lowerCamelCase__ ,torch.tensor(lowerCamelCase__ ) ,torch.tensor(lowerCamelCase__ ) )
self.assertEqual(masks[0].shape ,(1, 3, 1_764, 2_646) )
# should also work with np
UpperCAmelCase__ = [np.ones((1, 3, 5, 5) )]
UpperCAmelCase__ = processor.post_process_masks(lowerCamelCase__ ,np.array(lowerCamelCase__ ) ,np.array(lowerCamelCase__ ) )
self.assertEqual(masks[0].shape ,(1, 3, 1_764, 2_646) )
UpperCAmelCase__ = [[1, 0], [0, 1]]
with self.assertRaises(lowerCamelCase__ ):
UpperCAmelCase__ = processor.post_process_masks(lowerCamelCase__ ,np.array(lowerCamelCase__ ) ,np.array(lowerCamelCase__ ) )
@require_vision
@require_tf
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = SamImageProcessor()
UpperCAmelCase__ = SamProcessor(lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : str ,**lowerCamelCase__ : Union[str, Any] ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**lowerCamelCase__ ).image_processor
def __lowerCAmelCase ( self : List[Any] ):
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
UpperCAmelCase__ = [Image.fromarray(np.moveaxis(lowerCamelCase__ ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ = self.get_image_processor(do_normalize=lowerCamelCase__ ,padding_value=1.0 )
UpperCAmelCase__ = SamProcessor.from_pretrained(self.tmpdirname ,do_normalize=lowerCamelCase__ ,padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Union[str, Any] ):
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = SamProcessor(image_processor=lowerCamelCase__ )
UpperCAmelCase__ = self.prepare_image_inputs()
UpperCAmelCase__ = image_processor(lowerCamelCase__ ,return_tensors='np' )
UpperCAmelCase__ = processor(images=lowerCamelCase__ ,return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
@require_tf
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = SamProcessor(image_processor=lowerCamelCase__ )
UpperCAmelCase__ = [tf.ones((1, 3, 5, 5) )]
UpperCAmelCase__ = [[1_764, 2_646]]
UpperCAmelCase__ = [[683, 1_024]]
UpperCAmelCase__ = processor.post_process_masks(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,return_tensors='tf' )
self.assertEqual(masks[0].shape ,(1, 3, 1_764, 2_646) )
UpperCAmelCase__ = processor.post_process_masks(
lowerCamelCase__ ,tf.convert_to_tensor(lowerCamelCase__ ) ,tf.convert_to_tensor(lowerCamelCase__ ) ,return_tensors='tf' ,)
self.assertEqual(masks[0].shape ,(1, 3, 1_764, 2_646) )
# should also work with np
UpperCAmelCase__ = [np.ones((1, 3, 5, 5) )]
UpperCAmelCase__ = processor.post_process_masks(
lowerCamelCase__ ,np.array(lowerCamelCase__ ) ,np.array(lowerCamelCase__ ) ,return_tensors='tf' )
self.assertEqual(masks[0].shape ,(1, 3, 1_764, 2_646) )
UpperCAmelCase__ = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
UpperCAmelCase__ = processor.post_process_masks(
lowerCamelCase__ ,np.array(lowerCamelCase__ ) ,np.array(lowerCamelCase__ ) ,return_tensors='tf' )
@require_vision
@require_torchvision
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = SamImageProcessor()
UpperCAmelCase__ = SamProcessor(lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : Dict ,**lowerCamelCase__ : Any ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**lowerCamelCase__ ).image_processor
def __lowerCAmelCase ( self : Optional[Any] ):
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
UpperCAmelCase__ = [Image.fromarray(np.moveaxis(lowerCamelCase__ ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = SamProcessor(image_processor=lowerCamelCase__ )
UpperCAmelCase__ = np.random.randint(0 ,2 ,size=(1, 3, 5, 5) ).astype(np.floataa )
UpperCAmelCase__ = [tf.convert_to_tensor(lowerCamelCase__ )]
UpperCAmelCase__ = [torch.tensor(lowerCamelCase__ )]
UpperCAmelCase__ = [[1_764, 2_646]]
UpperCAmelCase__ = [[683, 1_024]]
UpperCAmelCase__ = processor.post_process_masks(
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,return_tensors='tf' )
UpperCAmelCase__ = processor.post_process_masks(
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,return_tensors='pt' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = SamProcessor(image_processor=lowerCamelCase__ )
UpperCAmelCase__ = self.prepare_image_inputs()
UpperCAmelCase__ = image_processor(lowerCamelCase__ ,return_tensors='pt' )['pixel_values'].numpy()
UpperCAmelCase__ = processor(images=lowerCamelCase__ ,return_tensors='pt' )['pixel_values'].numpy()
UpperCAmelCase__ = image_processor(lowerCamelCase__ ,return_tensors='tf' )['pixel_values'].numpy()
UpperCAmelCase__ = processor(images=lowerCamelCase__ ,return_tensors='tf' )['pixel_values'].numpy()
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ) )
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ) )
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ) )
| 632 | 1 |
"""simple docstring"""
import warnings
from functools import wraps
from typing import Callable
def a_ ( lowerCamelCase ):
@wraps(lowerCamelCase )
def _inner_fn(*lowerCamelCase , **lowerCamelCase ):
warnings.warn(
(f'''\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.''') , lowerCamelCase , )
return fn(*lowerCamelCase , **lowerCamelCase )
return _inner_fn
| 632 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : Any = logging.get_logger(__name__)
lowerCAmelCase__ : str = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "ctrl"
snake_case__ = ["past_key_values"]
snake_case__ = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Any ,lowerCamelCase__ : str=246_534 ,lowerCamelCase__ : List[str]=256 ,lowerCamelCase__ : Optional[int]=1_280 ,lowerCamelCase__ : Any=8_192 ,lowerCamelCase__ : int=48 ,lowerCamelCase__ : Optional[Any]=16 ,lowerCamelCase__ : Union[str, Any]=0.1 ,lowerCamelCase__ : Dict=0.1 ,lowerCamelCase__ : List[str]=1e-6 ,lowerCamelCase__ : List[str]=0.0_2 ,lowerCamelCase__ : Tuple=True ,**lowerCamelCase__ : Optional[Any] ,):
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = n_positions
UpperCAmelCase__ = n_embd
UpperCAmelCase__ = n_layer
UpperCAmelCase__ = n_head
UpperCAmelCase__ = dff
UpperCAmelCase__ = resid_pdrop
UpperCAmelCase__ = embd_pdrop
UpperCAmelCase__ = layer_norm_epsilon
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = use_cache
super().__init__(**lowerCamelCase__ )
| 632 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = 42
class snake_case ( __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
@register_to_config
def __init__( self : Any ,lowerCamelCase__ : int = 32 ,lowerCamelCase__ : int = 64 ,lowerCamelCase__ : int = 20 ,lowerCamelCase__ : int = 768 ,lowerCamelCase__ : Optional[Any]=77 ,lowerCamelCase__ : Optional[int]=4 ,lowerCamelCase__ : float = 0.0 ,lowerCamelCase__ : str = "silu" ,lowerCamelCase__ : Optional[str] = None ,lowerCamelCase__ : Optional[str] = None ,lowerCamelCase__ : Optional[str] = "linear" ,lowerCamelCase__ : Optional[str] = "prd" ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[int] = None ,):
super().__init__()
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = attention_head_dim
UpperCAmelCase__ = num_attention_heads * attention_head_dim
UpperCAmelCase__ = additional_embeddings
UpperCAmelCase__ = time_embed_dim or inner_dim
UpperCAmelCase__ = embedding_proj_dim or embedding_dim
UpperCAmelCase__ = clip_embed_dim or embedding_dim
UpperCAmelCase__ = Timesteps(lowerCamelCase__ ,lowerCamelCase__ ,0 )
UpperCAmelCase__ = TimestepEmbedding(lowerCamelCase__ ,lowerCamelCase__ ,out_dim=lowerCamelCase__ ,act_fn=lowerCamelCase__ )
UpperCAmelCase__ = nn.Linear(lowerCamelCase__ ,lowerCamelCase__ )
if embedding_proj_norm_type is None:
UpperCAmelCase__ = None
elif embedding_proj_norm_type == "layer":
UpperCAmelCase__ = nn.LayerNorm(lowerCamelCase__ )
else:
raise ValueError(f'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
UpperCAmelCase__ = nn.Linear(lowerCamelCase__ ,lowerCamelCase__ )
if encoder_hid_proj_type is None:
UpperCAmelCase__ = None
elif encoder_hid_proj_type == "linear":
UpperCAmelCase__ = nn.Linear(lowerCamelCase__ ,lowerCamelCase__ )
else:
raise ValueError(f'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
UpperCAmelCase__ = nn.Parameter(torch.zeros(1 ,num_embeddings + additional_embeddings ,lowerCamelCase__ ) )
if added_emb_type == "prd":
UpperCAmelCase__ = nn.Parameter(torch.zeros(1 ,1 ,lowerCamelCase__ ) )
elif added_emb_type is None:
UpperCAmelCase__ = None
else:
raise ValueError(
f'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
UpperCAmelCase__ = nn.ModuleList(
[
BasicTransformerBlock(
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,dropout=lowerCamelCase__ ,activation_fn='gelu' ,attention_bias=lowerCamelCase__ ,)
for d in range(lowerCamelCase__ )
] )
if norm_in_type == "layer":
UpperCAmelCase__ = nn.LayerNorm(lowerCamelCase__ )
elif norm_in_type is None:
UpperCAmelCase__ = None
else:
raise ValueError(f'''Unsupported norm_in_type: {norm_in_type}.''' )
UpperCAmelCase__ = nn.LayerNorm(lowerCamelCase__ )
UpperCAmelCase__ = nn.Linear(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] ,-1_0_0_0_0.0 )
causal_attention_mask.triu_(1 )
UpperCAmelCase__ = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' ,lowerCamelCase__ ,persistent=lowerCamelCase__ )
UpperCAmelCase__ = nn.Parameter(torch.zeros(1 ,lowerCamelCase__ ) )
UpperCAmelCase__ = nn.Parameter(torch.zeros(1 ,lowerCamelCase__ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = {}
def fn_recursive_add_processors(lowerCamelCase__ : str ,lowerCamelCase__ : torch.nn.Module ,lowerCamelCase__ : Dict[str, AttentionProcessor] ):
if hasattr(lowerCamelCase__ ,'set_processor' ):
UpperCAmelCase__ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' ,lowerCamelCase__ ,lowerCamelCase__ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
return processors
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
UpperCAmelCase__ = len(self.attn_processors.keys() )
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) and len(lowerCamelCase__ ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(lowerCamelCase__ )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(lowerCamelCase__ : str ,lowerCamelCase__ : torch.nn.Module ,lowerCamelCase__ : List[str] ):
if hasattr(lowerCamelCase__ ,'set_processor' ):
if not isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
module.set_processor(lowerCamelCase__ )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' ,lowerCamelCase__ ,lowerCamelCase__ )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : int ):
self.set_attn_processor(AttnProcessor() )
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Union[torch.Tensor, float, int] ,lowerCamelCase__ : torch.FloatTensor ,lowerCamelCase__ : Optional[torch.FloatTensor] = None ,lowerCamelCase__ : Optional[torch.BoolTensor] = None ,lowerCamelCase__ : bool = True ,):
UpperCAmelCase__ = hidden_states.shape[0]
UpperCAmelCase__ = timestep
if not torch.is_tensor(lowerCamelCase__ ):
UpperCAmelCase__ = torch.tensor([timesteps] ,dtype=torch.long ,device=hidden_states.device )
elif torch.is_tensor(lowerCamelCase__ ) and len(timesteps.shape ) == 0:
UpperCAmelCase__ = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCAmelCase__ = timesteps * torch.ones(lowerCamelCase__ ,dtype=timesteps.dtype ,device=timesteps.device )
UpperCAmelCase__ = self.time_proj(lowerCamelCase__ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
UpperCAmelCase__ = timesteps_projected.to(dtype=self.dtype )
UpperCAmelCase__ = self.time_embedding(lowerCamelCase__ )
if self.embedding_proj_norm is not None:
UpperCAmelCase__ = self.embedding_proj_norm(lowerCamelCase__ )
UpperCAmelCase__ = self.embedding_proj(lowerCamelCase__ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
UpperCAmelCase__ = self.encoder_hidden_states_proj(lowerCamelCase__ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
UpperCAmelCase__ = self.proj_in(lowerCamelCase__ )
UpperCAmelCase__ = self.positional_embedding.to(hidden_states.dtype )
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
if encoder_hidden_states is not None:
additional_embeds.append(lowerCamelCase__ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
UpperCAmelCase__ = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
UpperCAmelCase__ = hidden_states[:, None, :]
UpperCAmelCase__ = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
UpperCAmelCase__ = self.prd_embedding.to(hidden_states.dtype ).expand(lowerCamelCase__ ,-1 ,-1 )
additional_embeds.append(lowerCamelCase__ )
UpperCAmelCase__ = torch.cat(
lowerCamelCase__ ,dim=1 ,)
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
UpperCAmelCase__ = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
UpperCAmelCase__ = F.pad(
lowerCamelCase__ ,(
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) ,value=0.0 ,)
UpperCAmelCase__ = hidden_states + positional_embeddings
if attention_mask is not None:
UpperCAmelCase__ = (1 - attention_mask.to(hidden_states.dtype )) * -1_0_0_0_0.0
UpperCAmelCase__ = F.pad(lowerCamelCase__ ,(0, self.additional_embeddings) ,value=0.0 )
UpperCAmelCase__ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
UpperCAmelCase__ = attention_mask.repeat_interleave(self.config.num_attention_heads ,dim=0 )
if self.norm_in is not None:
UpperCAmelCase__ = self.norm_in(lowerCamelCase__ )
for block in self.transformer_blocks:
UpperCAmelCase__ = block(lowerCamelCase__ ,attention_mask=lowerCamelCase__ )
UpperCAmelCase__ = self.norm_out(lowerCamelCase__ )
if self.prd_embedding is not None:
UpperCAmelCase__ = hidden_states[:, -1]
else:
UpperCAmelCase__ = hidden_states[:, additional_embeddings_len:]
UpperCAmelCase__ = self.proj_to_clip_embeddings(lowerCamelCase__ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : Optional[Any] ):
UpperCAmelCase__ = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 632 | """simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCAmelCase__ : Dict = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def a_ ( ):
UpperCAmelCase__ = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
UpperCAmelCase__ = get_sagemaker_input()
else:
UpperCAmelCase__ = get_cluster_input()
return config
def a_ ( lowerCamelCase=None ):
if subparsers is not None:
UpperCAmelCase__ = subparsers.add_parser('config' , description=lowerCamelCase )
else:
UpperCAmelCase__ = argparse.ArgumentParser('Accelerate config command' , description=lowerCamelCase )
parser.add_argument(
'--config_file' , default=lowerCamelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCamelCase )
return parser
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = get_user_input()
if args.config_file is not None:
UpperCAmelCase__ = args.config_file
else:
if not os.path.isdir(lowerCamelCase ):
os.makedirs(lowerCamelCase )
UpperCAmelCase__ = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(lowerCamelCase )
else:
config.to_yaml_file(lowerCamelCase )
print(f'''accelerate configuration saved at {config_file}''' )
def a_ ( ):
UpperCAmelCase__ = config_command_parser()
UpperCAmelCase__ = parser.parse_args()
config_command(lowerCamelCase )
if __name__ == "__main__":
main()
| 632 | 1 |
"""simple docstring"""
from __future__ import annotations
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ , UpperCAmelCase__ = set(lowerCamelCase ), [start]
while stack:
UpperCAmelCase__ = stack.pop()
explored.add(lowerCamelCase )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(lowerCamelCase )
return explored
lowerCAmelCase__ : Dict = {
'A': ['B', 'C', 'D'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F'],
'D': ['B', 'D'],
'E': ['B', 'F'],
'F': ['C', 'E', 'G'],
'G': ['F'],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, 'A'))
| 632 | """simple docstring"""
def a_ ( lowerCamelCase , lowerCamelCase ):
return x if y == 0 else greatest_common_divisor(lowerCamelCase , x % y )
def a_ ( lowerCamelCase , lowerCamelCase ):
return (x * y) // greatest_common_divisor(lowerCamelCase , lowerCamelCase )
def a_ ( lowerCamelCase = 2_0 ):
UpperCAmelCase__ = 1
for i in range(1 , n + 1 ):
UpperCAmelCase__ = lcm(lowerCamelCase , lowerCamelCase )
return g
if __name__ == "__main__":
print(F"""{solution() = }""")
| 632 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
lowerCAmelCase__ : str = logging.get_logger(__name__)
def a_ ( lowerCamelCase ):
if isinstance(lowerCamelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowerCamelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowerCamelCase ):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''' )
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = ["pixel_values"]
def __init__( self : Any ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Union[int, float] = 1 / 255 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Optional[Union[float, List[float]]] = None ,lowerCamelCase__ : Optional[Union[float, List[float]]] = None ,**lowerCamelCase__ : int ,):
super().__init__(**lowerCamelCase__ )
UpperCAmelCase__ = size if size is not None else {'shortest_edge': 256}
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ ,default_to_square=lowerCamelCase__ )
UpperCAmelCase__ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ ,param_name='crop_size' )
UpperCAmelCase__ = do_resize
UpperCAmelCase__ = size
UpperCAmelCase__ = do_center_crop
UpperCAmelCase__ = crop_size
UpperCAmelCase__ = resample
UpperCAmelCase__ = do_rescale
UpperCAmelCase__ = rescale_factor
UpperCAmelCase__ = offset
UpperCAmelCase__ = do_normalize
UpperCAmelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Dict[str, int] ,lowerCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : Optional[Any] ,):
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ ,default_to_square=lowerCamelCase__ )
if "shortest_edge" in size:
UpperCAmelCase__ = get_resize_output_image_size(lowerCamelCase__ ,size['shortest_edge'] ,default_to_square=lowerCamelCase__ )
elif "height" in size and "width" in size:
UpperCAmelCase__ = (size['height'], size['width'])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(lowerCamelCase__ ,size=lowerCamelCase__ ,resample=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Dict[str, int] ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : Optional[Any] ,):
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(lowerCamelCase__ ,size=(size['height'], size['width']) ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : Any ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Union[int, float] ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : List[str] ,):
UpperCAmelCase__ = image.astype(np.floataa )
if offset:
UpperCAmelCase__ = image - (scale / 2)
return rescale(lowerCamelCase__ ,scale=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : int ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Union[float, List[float]] ,lowerCamelCase__ : Union[float, List[float]] ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : int ,):
return normalize(lowerCamelCase__ ,mean=lowerCamelCase__ ,std=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : int ,lowerCamelCase__ : ImageInput ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : PILImageResampling = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : float = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Optional[Union[float, List[float]]] = None ,lowerCamelCase__ : Optional[Union[float, List[float]]] = None ,lowerCamelCase__ : Optional[ChannelDimension] = ChannelDimension.FIRST ,):
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
if offset and not do_rescale:
raise ValueError('For offset, do_rescale must also be set to True.' )
# All transformations expect numpy arrays.
UpperCAmelCase__ = to_numpy_array(lowerCamelCase__ )
if do_resize:
UpperCAmelCase__ = self.resize(image=lowerCamelCase__ ,size=lowerCamelCase__ ,resample=lowerCamelCase__ )
if do_center_crop:
UpperCAmelCase__ = self.center_crop(lowerCamelCase__ ,size=lowerCamelCase__ )
if do_rescale:
UpperCAmelCase__ = self.rescale(image=lowerCamelCase__ ,scale=lowerCamelCase__ ,offset=lowerCamelCase__ )
if do_normalize:
UpperCAmelCase__ = self.normalize(image=lowerCamelCase__ ,mean=lowerCamelCase__ ,std=lowerCamelCase__ )
UpperCAmelCase__ = to_channel_dimension_format(lowerCamelCase__ ,lowerCamelCase__ )
return image
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : ImageInput ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : PILImageResampling = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : float = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Optional[Union[float, List[float]]] = None ,lowerCamelCase__ : Optional[Union[float, List[float]]] = None ,lowerCamelCase__ : Optional[Union[str, TensorType]] = None ,lowerCamelCase__ : ChannelDimension = ChannelDimension.FIRST ,**lowerCamelCase__ : Dict ,):
UpperCAmelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ = resample if resample is not None else self.resample
UpperCAmelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ = offset if offset is not None else self.offset
UpperCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ = image_std if image_std is not None else self.image_std
UpperCAmelCase__ = size if size is not None else self.size
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ ,default_to_square=lowerCamelCase__ )
UpperCAmelCase__ = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ ,param_name='crop_size' )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
UpperCAmelCase__ = make_batched(lowerCamelCase__ )
UpperCAmelCase__ = [
[
self._preprocess_image(
image=lowerCamelCase__ ,do_resize=lowerCamelCase__ ,size=lowerCamelCase__ ,resample=lowerCamelCase__ ,do_center_crop=lowerCamelCase__ ,crop_size=lowerCamelCase__ ,do_rescale=lowerCamelCase__ ,rescale_factor=lowerCamelCase__ ,offset=lowerCamelCase__ ,do_normalize=lowerCamelCase__ ,image_mean=lowerCamelCase__ ,image_std=lowerCamelCase__ ,data_format=lowerCamelCase__ ,)
for img in video
]
for video in videos
]
UpperCAmelCase__ = {'pixel_values': videos}
return BatchFeature(data=lowerCamelCase__ ,tensor_type=lowerCamelCase__ )
| 632 | """simple docstring"""
import warnings
from functools import wraps
from typing import Callable
def a_ ( lowerCamelCase ):
@wraps(lowerCamelCase )
def _inner_fn(*lowerCamelCase , **lowerCamelCase ):
warnings.warn(
(f'''\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.''') , lowerCamelCase , )
return fn(*lowerCamelCase , **lowerCamelCase )
return _inner_fn
| 632 | 1 |
"""simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
lowerCAmelCase__ : Dict = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
lowerCAmelCase__ : List[Any] = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
lowerCAmelCase__ : Optional[int] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def __lowerCAmelCase ( self : Any ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='https://github.com/krishnap25/mauve' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/krishnap25/mauve'] ,reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] ,)
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Any ,lowerCamelCase__ : Optional[int]=None ,lowerCamelCase__ : Optional[int]=None ,lowerCamelCase__ : List[Any]=None ,lowerCamelCase__ : Optional[int]=None ,lowerCamelCase__ : str="auto" ,lowerCamelCase__ : Any=-1 ,lowerCamelCase__ : List[Any]=0.9 ,lowerCamelCase__ : Union[str, Any]=5 ,lowerCamelCase__ : Optional[Any]=500 ,lowerCamelCase__ : Tuple="gpt2-large" ,lowerCamelCase__ : int=-1 ,lowerCamelCase__ : Optional[int]=1_024 ,lowerCamelCase__ : Optional[int]=25 ,lowerCamelCase__ : List[Any]=5 ,lowerCamelCase__ : int=True ,lowerCamelCase__ : Union[str, Any]=25 ,):
UpperCAmelCase__ = compute_mauve(
p_text=lowerCamelCase__ ,q_text=lowerCamelCase__ ,p_features=lowerCamelCase__ ,q_features=lowerCamelCase__ ,p_tokens=lowerCamelCase__ ,q_tokens=lowerCamelCase__ ,num_buckets=lowerCamelCase__ ,pca_max_data=lowerCamelCase__ ,kmeans_explained_var=lowerCamelCase__ ,kmeans_num_redo=lowerCamelCase__ ,kmeans_max_iter=lowerCamelCase__ ,featurize_model_name=lowerCamelCase__ ,device_id=lowerCamelCase__ ,max_text_length=lowerCamelCase__ ,divergence_curve_discretization_size=lowerCamelCase__ ,mauve_scaling_factor=lowerCamelCase__ ,verbose=lowerCamelCase__ ,seed=lowerCamelCase__ ,)
return out
| 632 | """simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowerCAmelCase__ : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowerCAmelCase__ : list[int] = [ord(letter) for letter in string.ascii_lowercase]
lowerCAmelCase__ : set[int] = {ord(char) for char in VALID_CHARS}
lowerCAmelCase__ : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = ""
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
for keychar, cipherchar in zip(cycle(lowerCamelCase ) , lowerCamelCase ):
UpperCAmelCase__ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowerCamelCase )
return decoded
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = []
for key in product(lowerCamelCase , repeat=3 ):
UpperCAmelCase__ = try_key(lowerCamelCase , lowerCamelCase )
if encoded is not None:
possibles.append(lowerCamelCase )
return possibles
def a_ ( lowerCamelCase , lowerCamelCase ):
return [possible for possible in possibles if common_word in possible.lower()]
def a_ ( lowerCamelCase = "p059_cipher.txt" ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = Path(lowerCamelCase ).parent.joinpath(lowerCamelCase ).read_text(encoding='utf-8' )
UpperCAmelCase__ = [int(lowerCamelCase ) for number in data.strip().split(',' )]
UpperCAmelCase__ = filter_valid_chars(lowerCamelCase )
for common_word in COMMON_WORDS:
UpperCAmelCase__ = filter_common_word(lowerCamelCase , lowerCamelCase )
if len(lowerCamelCase ) == 1:
break
UpperCAmelCase__ = possibles[0]
return sum(ord(lowerCamelCase ) for char in decoded_text )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 632 | 1 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCAmelCase__ : Union[str, Any] = False
class snake_case ( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' ,torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe.dual_guided(
prompt='first prompt' ,image=lowerCamelCase__ ,text_to_image_strength=0.7_5 ,generator=lowerCamelCase__ ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='numpy' ,).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase__ )
UpperCAmelCase__ = VersatileDiffusionPipeline.from_pretrained(lowerCamelCase__ ,torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = generator.manual_seed(0 )
UpperCAmelCase__ = pipe.dual_guided(
prompt='first prompt' ,image=lowerCamelCase__ ,text_to_image_strength=0.7_5 ,generator=lowerCamelCase__ ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='numpy' ,).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' ,torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = 'cyberpunk 2077'
UpperCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe.dual_guided(
prompt=lowerCamelCase__ ,image=lowerCamelCase__ ,text_to_image_strength=0.7_5 ,generator=lowerCamelCase__ ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type='numpy' ,).images
UpperCAmelCase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase__ = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCAmelCase__ = 'A painting of a squirrel eating a burger '
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe.text_to_image(
prompt=lowerCamelCase__ ,generator=lowerCamelCase__ ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type='numpy' ).images
UpperCAmelCase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase__ = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCAmelCase__ = pipe.image_variation(lowerCamelCase__ ,generator=lowerCamelCase__ ,output_type='numpy' ).images
UpperCAmelCase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase__ = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 632 | """simple docstring"""
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ : Any = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class snake_case ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = XLMProphetNetTokenizer
snake_case__ = False
snake_case__ = True
def __lowerCAmelCase ( self : Any ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ = XLMProphetNetTokenizer(lowerCamelCase__ ,keep_accents=lowerCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : Dict ):
UpperCAmelCase__ = '[PAD]'
UpperCAmelCase__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) ,lowerCamelCase__ )
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'[PAD]' )
self.assertEqual(vocab_keys[1] ,'[CLS]' )
self.assertEqual(vocab_keys[-1] ,'j' )
self.assertEqual(len(lowerCamelCase__ ) ,1_012 )
def __lowerCAmelCase ( self : List[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size ,1_012 )
def __lowerCAmelCase ( self : str ):
UpperCAmelCase__ = XLMProphetNetTokenizer(lowerCamelCase__ ,keep_accents=lowerCamelCase__ )
UpperCAmelCase__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowerCamelCase__ ,['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,)
UpperCAmelCase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowerCamelCase__ ,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] ,)
UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] ,)
UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ ,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'[UNK]',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'[UNK]',
'.',
] ,)
@cached_property
def __lowerCAmelCase ( self : Dict ):
return XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased' )
@slow
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = 'Hello World!'
UpperCAmelCase__ = [35_389, 6_672, 49, 2]
self.assertListEqual(lowerCamelCase__ ,self.big_tokenizer.encode(lowerCamelCase__ ) )
@slow
def __lowerCAmelCase ( self : List[str] ):
# fmt: off
UpperCAmelCase__ = {'input_ids': [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ ,model_name='microsoft/xprophetnet-large-wiki100-cased' ,revision='1acad1643ddd54a44df6a1b797ada8373685d90e' ,)
| 632 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ : List[str] = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[str] = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
lowerCAmelCase__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 632 | """simple docstring"""
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def a_ ( lowerCamelCase ):
return "".join(sorted(lowerCamelCase ) )
def a_ ( lowerCamelCase ):
return word_by_signature[signature(lowerCamelCase )]
lowerCAmelCase__ : str = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
lowerCAmelCase__ : str = sorted({word.strip().lower() for word in data.splitlines()})
lowerCAmelCase__ : Optional[Any] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
lowerCAmelCase__ : int = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams))
| 632 | 1 |
"""simple docstring"""
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=False ):
if isinstance(lowerCamelCase , lowerCamelCase ) and isinstance(lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = len(set_a.intersection(lowerCamelCase ) )
if alternative_union:
UpperCAmelCase__ = len(lowerCamelCase ) + len(lowerCamelCase )
else:
UpperCAmelCase__ = len(set_a.union(lowerCamelCase ) )
return intersection / union
if isinstance(lowerCamelCase , (list, tuple) ) and isinstance(lowerCamelCase , (list, tuple) ):
UpperCAmelCase__ = [element for element in set_a if element in set_b]
if alternative_union:
UpperCAmelCase__ = len(lowerCamelCase ) + len(lowerCamelCase )
return len(lowerCamelCase ) / union
else:
UpperCAmelCase__ = set_a + [element for element in set_b if element not in set_a]
return len(lowerCamelCase ) / len(lowerCamelCase )
return len(lowerCamelCase ) / len(lowerCamelCase )
return None
if __name__ == "__main__":
lowerCAmelCase__ : Union[str, Any] = {'a', 'b', 'c', 'd', 'e'}
lowerCAmelCase__ : List[str] = {'c', 'd', 'e', 'f', 'h', 'i'}
print(jaccard_similarity(set_a, set_b))
| 632 | """simple docstring"""
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class snake_case ( ctypes.Structure ):
"""simple docstring"""
snake_case__ = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def a_ ( ):
if os.name == "nt":
UpperCAmelCase__ = CursorInfo()
UpperCAmelCase__ = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowerCamelCase , ctypes.byref(lowerCamelCase ) )
UpperCAmelCase__ = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowerCamelCase , ctypes.byref(lowerCamelCase ) )
elif os.name == "posix":
sys.stdout.write('\033[?25l' )
sys.stdout.flush()
def a_ ( ):
if os.name == "nt":
UpperCAmelCase__ = CursorInfo()
UpperCAmelCase__ = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowerCamelCase , ctypes.byref(lowerCamelCase ) )
UpperCAmelCase__ = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowerCamelCase , ctypes.byref(lowerCamelCase ) )
elif os.name == "posix":
sys.stdout.write('\033[?25h' )
sys.stdout.flush()
@contextmanager
def a_ ( ):
try:
hide_cursor()
yield
finally:
show_cursor()
| 632 | 1 |
"""simple docstring"""
def a_ ( lowerCamelCase ):
if n == 1 or not isinstance(lowerCamelCase , lowerCamelCase ):
return 0
elif n == 2:
return 1
else:
UpperCAmelCase__ = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = 0
UpperCAmelCase__ = 2
while digits < n:
index += 1
UpperCAmelCase__ = len(str(fibonacci(lowerCamelCase ) ) )
return index
def a_ ( lowerCamelCase = 1_0_0_0 ):
return fibonacci_digits_index(lowerCamelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 632 | """simple docstring"""
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
def __get__( self : Dict ,lowerCamelCase__ : str ,lowerCamelCase__ : str=None ):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('unreadable attribute' )
UpperCAmelCase__ = '__cached_' + self.fget.__name__
UpperCAmelCase__ = getattr(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
if cached is None:
UpperCAmelCase__ = self.fget(lowerCamelCase__ )
setattr(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
return cached
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'''invalid truth value {val!r}''' )
def a_ ( lowerCamelCase ):
if is_torch_fx_proxy(lowerCamelCase ):
return True
if is_torch_available():
import torch
if isinstance(lowerCamelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(lowerCamelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(lowerCamelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(lowerCamelCase , np.ndarray )
def a_ ( lowerCamelCase ):
return isinstance(lowerCamelCase , np.ndarray )
def a_ ( lowerCamelCase ):
return _is_numpy(lowerCamelCase )
def a_ ( lowerCamelCase ):
import torch
return isinstance(lowerCamelCase , torch.Tensor )
def a_ ( lowerCamelCase ):
return False if not is_torch_available() else _is_torch(lowerCamelCase )
def a_ ( lowerCamelCase ):
import torch
return isinstance(lowerCamelCase , torch.device )
def a_ ( lowerCamelCase ):
return False if not is_torch_available() else _is_torch_device(lowerCamelCase )
def a_ ( lowerCamelCase ):
import torch
if isinstance(lowerCamelCase , lowerCamelCase ):
if hasattr(lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = getattr(lowerCamelCase , lowerCamelCase )
else:
return False
return isinstance(lowerCamelCase , torch.dtype )
def a_ ( lowerCamelCase ):
return False if not is_torch_available() else _is_torch_dtype(lowerCamelCase )
def a_ ( lowerCamelCase ):
import tensorflow as tf
return isinstance(lowerCamelCase , tf.Tensor )
def a_ ( lowerCamelCase ):
return False if not is_tf_available() else _is_tensorflow(lowerCamelCase )
def a_ ( lowerCamelCase ):
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(lowerCamelCase , 'is_symbolic_tensor' ):
return tf.is_symbolic_tensor(lowerCamelCase )
return type(lowerCamelCase ) == tf.Tensor
def a_ ( lowerCamelCase ):
return False if not is_tf_available() else _is_tf_symbolic_tensor(lowerCamelCase )
def a_ ( lowerCamelCase ):
import jax.numpy as jnp # noqa: F811
return isinstance(lowerCamelCase , jnp.ndarray )
def a_ ( lowerCamelCase ):
return False if not is_flax_available() else _is_jax(lowerCamelCase )
def a_ ( lowerCamelCase ):
if isinstance(lowerCamelCase , (dict, UserDict) ):
return {k: to_py_obj(lowerCamelCase ) for k, v in obj.items()}
elif isinstance(lowerCamelCase , (list, tuple) ):
return [to_py_obj(lowerCamelCase ) for o in obj]
elif is_tf_tensor(lowerCamelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(lowerCamelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(lowerCamelCase ):
return np.asarray(lowerCamelCase ).tolist()
elif isinstance(lowerCamelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def a_ ( lowerCamelCase ):
if isinstance(lowerCamelCase , (dict, UserDict) ):
return {k: to_numpy(lowerCamelCase ) for k, v in obj.items()}
elif isinstance(lowerCamelCase , (list, tuple) ):
return np.array(lowerCamelCase )
elif is_tf_tensor(lowerCamelCase ):
return obj.numpy()
elif is_torch_tensor(lowerCamelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(lowerCamelCase ):
return np.asarray(lowerCamelCase )
else:
return obj
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Dict ):
UpperCAmelCase__ = fields(self )
# Safety and consistency checks
if not len(lowerCamelCase__ ):
raise ValueError(f'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f'''{self.__class__.__name__} should not have more than one required field.''' )
UpperCAmelCase__ = getattr(self ,class_fields[0].name )
UpperCAmelCase__ = all(getattr(self ,field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(lowerCamelCase__ ):
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
UpperCAmelCase__ = first_field.items()
UpperCAmelCase__ = True
else:
try:
UpperCAmelCase__ = iter(lowerCamelCase__ )
UpperCAmelCase__ = True
except TypeError:
UpperCAmelCase__ = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(lowerCamelCase__ ):
if (
not isinstance(lowerCamelCase__ ,(list, tuple) )
or not len(lowerCamelCase__ ) == 2
or not isinstance(element[0] ,lowerCamelCase__ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
UpperCAmelCase__ = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self ,element[0] ,element[1] )
if element[1] is not None:
UpperCAmelCase__ = element[1]
elif first_field is not None:
UpperCAmelCase__ = first_field
else:
for field in class_fields:
UpperCAmelCase__ = getattr(self ,field.name )
if v is not None:
UpperCAmelCase__ = v
def __delitem__( self : List[Any] ,*lowerCamelCase__ : Any ,**lowerCamelCase__ : Dict ):
raise Exception(f'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def __lowerCAmelCase ( self : str ,*lowerCamelCase__ : int ,**lowerCamelCase__ : Optional[Any] ):
raise Exception(f'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def __lowerCAmelCase ( self : List[Any] ,*lowerCamelCase__ : int ,**lowerCamelCase__ : List[Any] ):
raise Exception(f'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def __lowerCAmelCase ( self : Optional[int] ,*lowerCamelCase__ : List[str] ,**lowerCamelCase__ : Optional[int] ):
raise Exception(f'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__( self : Tuple ,lowerCamelCase__ : List[Any] ):
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
UpperCAmelCase__ = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : int ,lowerCamelCase__ : int ,lowerCamelCase__ : int ):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(lowerCamelCase__ ,lowerCamelCase__ )
super().__setattr__(lowerCamelCase__ ,lowerCamelCase__ )
def __setitem__( self : Dict ,lowerCamelCase__ : int ,lowerCamelCase__ : Any ):
# Will raise a KeyException if needed
super().__setitem__(lowerCamelCase__ ,lowerCamelCase__ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : List[str] ):
return tuple(self[k] for k in self.keys() )
class snake_case ( __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
@classmethod
def __lowerCAmelCase ( cls : str ,lowerCamelCase__ : Optional[int] ):
raise ValueError(
f'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "longest"
snake_case__ = "max_length"
snake_case__ = "do_not_pad"
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "pt"
snake_case__ = "tf"
snake_case__ = "np"
snake_case__ = "jax"
class snake_case :
"""simple docstring"""
def __init__( self : int ,lowerCamelCase__ : List[ContextManager] ):
UpperCAmelCase__ = context_managers
UpperCAmelCase__ = ExitStack()
def __enter__( self : Union[str, Any] ):
for context_manager in self.context_managers:
self.stack.enter_context(lowerCamelCase__ )
def __exit__( self : List[Any] ,*lowerCamelCase__ : List[str] ,**lowerCamelCase__ : Dict ):
self.stack.__exit__(*lowerCamelCase__ ,**lowerCamelCase__ )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = infer_framework(lowerCamelCase )
if framework == "tf":
UpperCAmelCase__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase__ = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase__ = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = model_class.__name__
UpperCAmelCase__ = infer_framework(lowerCamelCase )
if framework == "tf":
UpperCAmelCase__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase__ = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase__ = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def a_ ( lowerCamelCase , lowerCamelCase = "" , lowerCamelCase = "." ):
def _flatten_dict(lowerCamelCase , lowerCamelCase="" , lowerCamelCase="." ):
for k, v in d.items():
UpperCAmelCase__ = str(lowerCamelCase ) + delimiter + str(lowerCamelCase ) if parent_key else k
if v and isinstance(lowerCamelCase , lowerCamelCase ):
yield from flatten_dict(lowerCamelCase , lowerCamelCase , delimiter=lowerCamelCase ).items()
else:
yield key, v
return dict(_flatten_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
@contextmanager
def a_ ( lowerCamelCase , lowerCamelCase = False ):
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def a_ ( lowerCamelCase , lowerCamelCase=None ):
if is_numpy_array(lowerCamelCase ):
return np.transpose(lowerCamelCase , axes=lowerCamelCase )
elif is_torch_tensor(lowerCamelCase ):
return array.T if axes is None else array.permute(*lowerCamelCase )
elif is_tf_tensor(lowerCamelCase ):
import tensorflow as tf
return tf.transpose(lowerCamelCase , perm=lowerCamelCase )
elif is_jax_tensor(lowerCamelCase ):
return jnp.transpose(lowerCamelCase , axes=lowerCamelCase )
else:
raise ValueError(f'''Type not supported for transpose: {type(lowerCamelCase )}.''' )
def a_ ( lowerCamelCase , lowerCamelCase ):
if is_numpy_array(lowerCamelCase ):
return np.reshape(lowerCamelCase , lowerCamelCase )
elif is_torch_tensor(lowerCamelCase ):
return array.reshape(*lowerCamelCase )
elif is_tf_tensor(lowerCamelCase ):
import tensorflow as tf
return tf.reshape(lowerCamelCase , lowerCamelCase )
elif is_jax_tensor(lowerCamelCase ):
return jnp.reshape(lowerCamelCase , lowerCamelCase )
else:
raise ValueError(f'''Type not supported for reshape: {type(lowerCamelCase )}.''' )
def a_ ( lowerCamelCase , lowerCamelCase=None ):
if is_numpy_array(lowerCamelCase ):
return np.squeeze(lowerCamelCase , axis=lowerCamelCase )
elif is_torch_tensor(lowerCamelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=lowerCamelCase )
elif is_tf_tensor(lowerCamelCase ):
import tensorflow as tf
return tf.squeeze(lowerCamelCase , axis=lowerCamelCase )
elif is_jax_tensor(lowerCamelCase ):
return jnp.squeeze(lowerCamelCase , axis=lowerCamelCase )
else:
raise ValueError(f'''Type not supported for squeeze: {type(lowerCamelCase )}.''' )
def a_ ( lowerCamelCase , lowerCamelCase ):
if is_numpy_array(lowerCamelCase ):
return np.expand_dims(lowerCamelCase , lowerCamelCase )
elif is_torch_tensor(lowerCamelCase ):
return array.unsqueeze(dim=lowerCamelCase )
elif is_tf_tensor(lowerCamelCase ):
import tensorflow as tf
return tf.expand_dims(lowerCamelCase , axis=lowerCamelCase )
elif is_jax_tensor(lowerCamelCase ):
return jnp.expand_dims(lowerCamelCase , axis=lowerCamelCase )
else:
raise ValueError(f'''Type not supported for expand_dims: {type(lowerCamelCase )}.''' )
def a_ ( lowerCamelCase ):
if is_numpy_array(lowerCamelCase ):
return np.size(lowerCamelCase )
elif is_torch_tensor(lowerCamelCase ):
return array.numel()
elif is_tf_tensor(lowerCamelCase ):
import tensorflow as tf
return tf.size(lowerCamelCase )
elif is_jax_tensor(lowerCamelCase ):
return array.size
else:
raise ValueError(f'''Type not supported for expand_dims: {type(lowerCamelCase )}.''' )
def a_ ( lowerCamelCase , lowerCamelCase ):
for key, value in auto_map.items():
if isinstance(lowerCamelCase , (tuple, list) ):
UpperCAmelCase__ = [f'''{repo_id}--{v}''' if (v is not None and '--' not in v) else v for v in value]
elif value is not None and "--" not in value:
UpperCAmelCase__ = f'''{repo_id}--{value}'''
return auto_map
def a_ ( lowerCamelCase ):
for base_class in inspect.getmro(lowerCamelCase ):
UpperCAmelCase__ = base_class.__module__
UpperCAmelCase__ = base_class.__name__
if module.startswith('tensorflow' ) or module.startswith('keras' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('torch' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('flax' ) or module.startswith('jax' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'''Could not infer framework from class {model_class}.''' )
| 632 | 1 |
"""simple docstring"""
def a_ ( lowerCamelCase , lowerCamelCase ):
while second != 0:
UpperCAmelCase__ = first & second
first ^= second
UpperCAmelCase__ = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ : Any = int(input('Enter the first number: ').strip())
lowerCAmelCase__ : Optional[Any] = int(input('Enter the second number: ').strip())
print(F"""{add(first, second) = }""")
| 632 | """simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCAmelCase__ : Union[str, Any] = False
class snake_case ( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' ,torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe.dual_guided(
prompt='first prompt' ,image=lowerCamelCase__ ,text_to_image_strength=0.7_5 ,generator=lowerCamelCase__ ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='numpy' ,).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase__ )
UpperCAmelCase__ = VersatileDiffusionPipeline.from_pretrained(lowerCamelCase__ ,torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = generator.manual_seed(0 )
UpperCAmelCase__ = pipe.dual_guided(
prompt='first prompt' ,image=lowerCamelCase__ ,text_to_image_strength=0.7_5 ,generator=lowerCamelCase__ ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='numpy' ,).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' ,torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = 'cyberpunk 2077'
UpperCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe.dual_guided(
prompt=lowerCamelCase__ ,image=lowerCamelCase__ ,text_to_image_strength=0.7_5 ,generator=lowerCamelCase__ ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type='numpy' ,).images
UpperCAmelCase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase__ = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCAmelCase__ = 'A painting of a squirrel eating a burger '
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe.text_to_image(
prompt=lowerCamelCase__ ,generator=lowerCamelCase__ ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type='numpy' ).images
UpperCAmelCase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase__ = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCAmelCase__ = pipe.image_variation(lowerCamelCase__ ,generator=lowerCamelCase__ ,output_type='numpy' ).images
UpperCAmelCase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase__ = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 632 | 1 |
"""simple docstring"""
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] ,lowerCamelCase__ : NestedDataStructureLike[PathLike] ,lowerCamelCase__ : Optional[NamedSplit] = None ,lowerCamelCase__ : Optional[Features] = None ,lowerCamelCase__ : str = None ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : Optional[int] = None ,**lowerCamelCase__ : Tuple ,):
super().__init__(
lowerCamelCase__ ,split=lowerCamelCase__ ,features=lowerCamelCase__ ,cache_dir=lowerCamelCase__ ,keep_in_memory=lowerCamelCase__ ,streaming=lowerCamelCase__ ,num_proc=lowerCamelCase__ ,**lowerCamelCase__ ,)
UpperCAmelCase__ = path_or_paths if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else {self.split: path_or_paths}
UpperCAmelCase__ = Text(
cache_dir=lowerCamelCase__ ,data_files=lowerCamelCase__ ,features=lowerCamelCase__ ,**lowerCamelCase__ ,)
def __lowerCAmelCase ( self : Optional[Any] ):
# Build iterable dataset
if self.streaming:
UpperCAmelCase__ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__ ,download_mode=lowerCamelCase__ ,verification_mode=lowerCamelCase__ ,base_path=lowerCamelCase__ ,num_proc=self.num_proc ,)
UpperCAmelCase__ = self.builder.as_dataset(
split=self.split ,verification_mode=lowerCamelCase__ ,in_memory=self.keep_in_memory )
return dataset
| 632 | """simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class snake_case ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = [R"h\.\d+\.attn\.bias", R"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self : str ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : int = 50_257 ,lowerCamelCase__ : int = 1_024 ,lowerCamelCase__ : int = 768 ,lowerCamelCase__ : int = 12 ,lowerCamelCase__ : int = 12 ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : str = "gelu_new" ,lowerCamelCase__ : float = 0.1 ,lowerCamelCase__ : float = 0.1 ,lowerCamelCase__ : float = 0.1 ,lowerCamelCase__ : float = 1e-5 ,lowerCamelCase__ : float = 0.0_2 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = False ,):
super().__init__()
UpperCAmelCase__ = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
f''' `n_embd`: {n_embd} are not equal.''' )
UpperCAmelCase__ = prefix_inner_dim
UpperCAmelCase__ = prefix_hidden_dim
UpperCAmelCase__ = (
nn.Linear(self.prefix_inner_dim ,self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
UpperCAmelCase__ = (
nn.Linear(self.prefix_hidden_dim ,lowerCamelCase__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
UpperCAmelCase__ = GPTaConfig(
vocab_size=lowerCamelCase__ ,n_positions=lowerCamelCase__ ,n_embd=lowerCamelCase__ ,n_layer=lowerCamelCase__ ,n_head=lowerCamelCase__ ,n_inner=lowerCamelCase__ ,activation_function=lowerCamelCase__ ,resid_pdrop=lowerCamelCase__ ,embd_pdrop=lowerCamelCase__ ,attn_pdrop=lowerCamelCase__ ,layer_norm_epsilon=lowerCamelCase__ ,initializer_range=lowerCamelCase__ ,scale_attn_weights=lowerCamelCase__ ,use_cache=lowerCamelCase__ ,scale_attn_by_inverse_layer_idx=lowerCamelCase__ ,reorder_and_upcast_attn=lowerCamelCase__ ,)
UpperCAmelCase__ = GPTaLMHeadModel(lowerCamelCase__ )
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : torch.Tensor ,lowerCamelCase__ : torch.Tensor ,lowerCamelCase__ : Optional[torch.Tensor] = None ,lowerCamelCase__ : Optional[torch.Tensor] = None ,):
UpperCAmelCase__ = self.transformer.transformer.wte(lowerCamelCase__ )
UpperCAmelCase__ = self.encode_prefix(lowerCamelCase__ )
UpperCAmelCase__ = self.decode_prefix(lowerCamelCase__ )
UpperCAmelCase__ = torch.cat((prefix_embeds, embedding_text) ,dim=1 )
if labels is not None:
UpperCAmelCase__ = self.get_dummy_token(input_ids.shape[0] ,input_ids.device )
UpperCAmelCase__ = torch.cat((dummy_token, input_ids) ,dim=1 )
UpperCAmelCase__ = self.transformer(inputs_embeds=lowerCamelCase__ ,labels=lowerCamelCase__ ,attention_mask=lowerCamelCase__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : torch.device ):
return torch.zeros(lowerCamelCase__ ,self.prefix_length ,dtype=torch.intaa ,device=lowerCamelCase__ )
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : List[str] ):
return self.encode_prefix(lowerCamelCase__ )
@torch.no_grad()
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Optional[int] ):
UpperCAmelCase__ = torch.split(lowerCamelCase__ ,1 ,dim=0 )
UpperCAmelCase__ = []
UpperCAmelCase__ = []
for feature in features:
UpperCAmelCase__ = self.decode_prefix(feature.to(lowerCamelCase__ ) ) # back to the clip feature
# Only support beam search for now
UpperCAmelCase__ , UpperCAmelCase__ = self.generate_beam(
input_embeds=lowerCamelCase__ ,device=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
UpperCAmelCase__ = torch.stack(lowerCamelCase__ )
UpperCAmelCase__ = torch.stack(lowerCamelCase__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : str=None ,lowerCamelCase__ : List[Any]=None ,lowerCamelCase__ : Tuple=None ,lowerCamelCase__ : int = 5 ,lowerCamelCase__ : int = 67 ,lowerCamelCase__ : float = 1.0 ,lowerCamelCase__ : Optional[int] = None ,):
UpperCAmelCase__ = eos_token_id
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = torch.ones(lowerCamelCase__ ,device=lowerCamelCase__ ,dtype=torch.int )
UpperCAmelCase__ = torch.zeros(lowerCamelCase__ ,device=lowerCamelCase__ ,dtype=torch.bool )
if input_embeds is not None:
UpperCAmelCase__ = input_embeds
else:
UpperCAmelCase__ = self.transformer.transformer.wte(lowerCamelCase__ )
for i in range(lowerCamelCase__ ):
UpperCAmelCase__ = self.transformer(inputs_embeds=lowerCamelCase__ )
UpperCAmelCase__ = outputs.logits
UpperCAmelCase__ = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
UpperCAmelCase__ = logits.softmax(-1 ).log()
if scores is None:
UpperCAmelCase__ , UpperCAmelCase__ = logits.topk(lowerCamelCase__ ,-1 )
UpperCAmelCase__ = generated.expand(lowerCamelCase__ ,*generated.shape[1:] )
UpperCAmelCase__ , UpperCAmelCase__ = next_tokens.permute(1 ,0 ), scores.squeeze(0 )
if tokens is None:
UpperCAmelCase__ = next_tokens
else:
UpperCAmelCase__ = tokens.expand(lowerCamelCase__ ,*tokens.shape[1:] )
UpperCAmelCase__ = torch.cat((tokens, next_tokens) ,dim=1 )
else:
UpperCAmelCase__ = -float(np.inf )
UpperCAmelCase__ = 0
UpperCAmelCase__ = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
UpperCAmelCase__ = scores_sum / seq_lengths[:, None]
UpperCAmelCase__ , UpperCAmelCase__ = scores_sum_average.view(-1 ).topk(lowerCamelCase__ ,-1 )
UpperCAmelCase__ = next_tokens // scores_sum.shape[1]
UpperCAmelCase__ = seq_lengths[next_tokens_source]
UpperCAmelCase__ = next_tokens % scores_sum.shape[1]
UpperCAmelCase__ = next_tokens.unsqueeze(1 )
UpperCAmelCase__ = tokens[next_tokens_source]
UpperCAmelCase__ = torch.cat((tokens, next_tokens) ,dim=1 )
UpperCAmelCase__ = generated[next_tokens_source]
UpperCAmelCase__ = scores_sum_average * seq_lengths
UpperCAmelCase__ = is_stopped[next_tokens_source]
UpperCAmelCase__ = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] ,1 ,-1 )
UpperCAmelCase__ = torch.cat((generated, next_token_embed) ,dim=1 )
UpperCAmelCase__ = is_stopped + next_tokens.eq(lowerCamelCase__ ).squeeze()
if is_stopped.all():
break
UpperCAmelCase__ = scores / seq_lengths
UpperCAmelCase__ = scores.argsort(descending=lowerCamelCase__ )
# tokens tensors are already padded to max_seq_length
UpperCAmelCase__ = [tokens[i] for i in order]
UpperCAmelCase__ = torch.stack(lowerCamelCase__ ,dim=0 )
UpperCAmelCase__ = torch.tensor([seq_lengths[i] for i in order] ,dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 632 | 1 |
"""simple docstring"""
def a_ ( lowerCamelCase = 6_0_0_8_5_1_4_7_5_1_4_3 ):
try:
UpperCAmelCase__ = int(lowerCamelCase )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
UpperCAmelCase__ = 1
UpperCAmelCase__ = 2
while i * i <= n:
while n % i == 0:
UpperCAmelCase__ = i
n //= i
i += 1
if n > 1:
UpperCAmelCase__ = n
return int(lowerCamelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 632 | """simple docstring"""
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
lowerCAmelCase__ : str = 'base_with_context'
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=lowerCamelCase )
for lyr_num, lyr in enumerate(model.encoders ):
UpperCAmelCase__ = weights[f'''layers_{lyr_num}''']
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
UpperCAmelCase__ = ly_weight['attention']
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=lowerCamelCase )
for lyr_num, lyr in enumerate(model.encoders ):
UpperCAmelCase__ = weights[f'''layers_{lyr_num}''']
UpperCAmelCase__ = ly_weight['attention']
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=lowerCamelCase )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
UpperCAmelCase__ = weights[f'''layers_{lyr_num}''']
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) )
UpperCAmelCase__ = ly_weight['self_attention']
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
UpperCAmelCase__ = ly_weight['MultiHeadDotProductAttention_0']
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) )
return model
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = checkpoints.load_tax_checkpoint(args.checkpoint_path )
UpperCAmelCase__ = jnp.tree_util.tree_map(onp.array , lowerCamelCase )
UpperCAmelCase__ = [
'from __gin__ import dynamic_registration',
'from music_spectrogram_diffusion.models.diffusion import diffusion_utils',
'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0',
'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()',
]
UpperCAmelCase__ = os.path.join(args.checkpoint_path , '..' , 'config.gin' )
UpperCAmelCase__ = inference.parse_training_gin_file(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = inference.InferenceModel(args.checkpoint_path , lowerCamelCase )
UpperCAmelCase__ = DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' )
UpperCAmelCase__ = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
UpperCAmelCase__ = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
UpperCAmelCase__ = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
UpperCAmelCase__ = load_notes_encoder(ta_checkpoint['target']['token_encoder'] , lowerCamelCase )
UpperCAmelCase__ = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , lowerCamelCase )
UpperCAmelCase__ = load_decoder(ta_checkpoint['target']['decoder'] , lowerCamelCase )
UpperCAmelCase__ = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' )
UpperCAmelCase__ = SpectrogramDiffusionPipeline(
notes_encoder=lowerCamelCase , continuous_encoder=lowerCamelCase , decoder=lowerCamelCase , scheduler=lowerCamelCase , melgan=lowerCamelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
lowerCAmelCase__ : Tuple = argparse.ArgumentParser()
parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument(
'--checkpoint_path',
default=F"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help='Path to the original jax model checkpoint.',
)
lowerCAmelCase__ : List[str] = parser.parse_args()
main(args)
| 632 | 1 |
"""simple docstring"""
from __future__ import annotations
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance < 0:
raise ValueError('Resistance cannot be negative' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 632 | """simple docstring"""
import socket
def a_ ( ):
UpperCAmelCase__ = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
UpperCAmelCase__ = socket.gethostname()
UpperCAmelCase__ = 1_2_3_1_2
sock.connect((host, port) )
sock.send(b'Hello server!' )
with open('Received_file' , 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
UpperCAmelCase__ = sock.recv(1_0_2_4 )
if not data:
break
out_file.write(lowerCamelCase )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 632 | 1 |
"""simple docstring"""
def a_ ( lowerCamelCase ):
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
UpperCAmelCase__ = len(lowerCamelCase )
UpperCAmelCase__ = max(lowerCamelCase )
UpperCAmelCase__ = min(lowerCamelCase )
# create the counting array
UpperCAmelCase__ = coll_max + 1 - coll_min
UpperCAmelCase__ = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , lowerCamelCase ):
UpperCAmelCase__ = counting_arr[i] + counting_arr[i - 1]
# create the output collection
UpperCAmelCase__ = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , lowerCamelCase ) ):
UpperCAmelCase__ = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def a_ ( lowerCamelCase ):
return "".join([chr(lowerCamelCase ) for i in counting_sort([ord(lowerCamelCase ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('thisisthestring') == "eghhiiinrsssttt"
lowerCAmelCase__ : List[Any] = input('Enter numbers separated by a comma:\n').strip()
lowerCAmelCase__ : List[Any] = [int(item) for item in user_input.split(',')]
print(counting_sort(unsorted))
| 632 | """simple docstring"""
from __future__ import annotations
class snake_case :
"""simple docstring"""
def __init__( self : Dict ,lowerCamelCase__ : list[list[int]] ):
UpperCAmelCase__ = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.' )
if len(lowerCamelCase__ ) != 0:
UpperCAmelCase__ = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(lowerCamelCase__ ) != cols:
raise error
for value in row:
if not isinstance(lowerCamelCase__ ,(int, float) ):
raise error
UpperCAmelCase__ = rows
else:
UpperCAmelCase__ = []
def __lowerCAmelCase ( self : Union[str, Any] ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def __lowerCAmelCase ( self : str ):
return len(self.rows )
@property
def __lowerCAmelCase ( self : List[Any] ):
return len(self.rows[0] )
@property
def __lowerCAmelCase ( self : Any ):
return (self.num_rows, self.num_columns)
@property
def __lowerCAmelCase ( self : Optional[int] ):
return self.order[0] == self.order[1]
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(lowerCamelCase__ )
def __lowerCAmelCase ( self : str ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def __lowerCAmelCase ( self : List[str] ):
return bool(self.determinant() )
def __lowerCAmelCase ( self : Any ,lowerCamelCase__ : int ,lowerCamelCase__ : int ):
UpperCAmelCase__ = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(lowerCamelCase__ ).determinant()
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : int ):
if (row + column) % 2 == 0:
return self.get_minor(lowerCamelCase__ ,lowerCamelCase__ )
return -1 * self.get_minor(lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Union[str, Any] ):
return Matrix(
[
[self.get_minor(lowerCamelCase__ ,lowerCamelCase__ ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def __lowerCAmelCase ( self : int ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse' )
return self.adjugate() * (1 / determinant)
def __repr__( self : Optional[Any] ):
return str(self.rows )
def __str__( self : List[str] ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(lowerCamelCase__ ) for value in row] ) + '.]'
for row in self.rows
] )
+ "]"
)
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : list[int] ,lowerCamelCase__ : int | None = None ):
UpperCAmelCase__ = TypeError('Row must be a list containing all ints and/or floats' )
if not isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
raise type_error
for value in row:
if not isinstance(lowerCamelCase__ ,(int, float) ):
raise type_error
if len(lowerCamelCase__ ) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix' )
if position is None:
self.rows.append(lowerCamelCase__ )
else:
UpperCAmelCase__ = self.rows[0:position] + [row] + self.rows[position:]
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : list[int] ,lowerCamelCase__ : int | None = None ):
UpperCAmelCase__ = TypeError(
'Column must be a list containing all ints and/or floats' )
if not isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
raise type_error
for value in column:
if not isinstance(lowerCamelCase__ ,(int, float) ):
raise type_error
if len(lowerCamelCase__ ) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix' )
if position is None:
UpperCAmelCase__ = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
UpperCAmelCase__ = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : List[Any] ,lowerCamelCase__ : object ):
if not isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : Any ,lowerCamelCase__ : object ):
return not self == other
def __neg__( self : Dict ):
return self * -1
def __add__( self : str ,lowerCamelCase__ : Matrix ):
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : List[str] ,lowerCamelCase__ : Matrix ):
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : List[str] ,lowerCamelCase__ : Matrix | int | float ):
if isinstance(lowerCamelCase__ ,(int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second' )
return Matrix(
[
[Matrix.dot_product(lowerCamelCase__ ,lowerCamelCase__ ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix' )
def __pow__( self : Optional[int] ,lowerCamelCase__ : int ):
if not isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
raise TypeError('A Matrix can only be raised to the power of an int' )
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power' )
UpperCAmelCase__ = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def __lowerCAmelCase ( cls : Union[str, Any] ,lowerCamelCase__ : list[int] ,lowerCamelCase__ : list[int] ):
return sum(row[i] * column[i] for i in range(len(lowerCamelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 632 | 1 |
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class snake_case :
"""simple docstring"""
def __init__( self : Union[str, Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : List[str]=13 ,lowerCamelCase__ : Optional[Any]=7 ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : Dict=True ,lowerCamelCase__ : str=True ,lowerCamelCase__ : Optional[int]=True ,lowerCamelCase__ : List[Any]=99 ,lowerCamelCase__ : Union[str, Any]=64 ,lowerCamelCase__ : Any=32 ,lowerCamelCase__ : List[Any]=5 ,lowerCamelCase__ : List[Any]=4 ,lowerCamelCase__ : Optional[int]=37 ,lowerCamelCase__ : Dict="gelu" ,lowerCamelCase__ : Union[str, Any]=0.1 ,lowerCamelCase__ : int=0.1 ,lowerCamelCase__ : Tuple=512 ,lowerCamelCase__ : Optional[Any]=16 ,lowerCamelCase__ : List[str]=2 ,lowerCamelCase__ : Any=0.0_2 ,lowerCamelCase__ : str=3 ,lowerCamelCase__ : Tuple=4 ,lowerCamelCase__ : int=None ,):
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = seq_length
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_input_mask
UpperCAmelCase__ = use_token_type_ids
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = embedding_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = num_labels
UpperCAmelCase__ = num_choices
UpperCAmelCase__ = scope
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase__ = None
if self.use_input_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
if self.use_token_type_ids:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase__ = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self : Tuple ):
return MobileBertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,embedding_size=self.embedding_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowerCamelCase__ ,initializer_range=self.initializer_range ,)
def __lowerCAmelCase ( self : List[str] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Dict ):
UpperCAmelCase__ = MobileBertModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase__ = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ )
UpperCAmelCase__ = model(lowerCamelCase__ ,token_type_ids=lowerCamelCase__ )
UpperCAmelCase__ = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def __lowerCAmelCase ( self : List[str] ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Tuple ):
UpperCAmelCase__ = MobileBertForMaskedLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase__ = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : int ,lowerCamelCase__ : Dict ,lowerCamelCase__ : int ,lowerCamelCase__ : Any ):
UpperCAmelCase__ = MobileBertForNextSentencePrediction(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase__ = model(
lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,labels=lowerCamelCase__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Any ,lowerCamelCase__ : int ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Dict ):
UpperCAmelCase__ = MobileBertForPreTraining(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase__ = model(
lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,labels=lowerCamelCase__ ,next_sentence_label=lowerCamelCase__ ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def __lowerCAmelCase ( self : List[str] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : Dict ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Any ):
UpperCAmelCase__ = MobileBertForQuestionAnswering(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase__ = model(
lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,start_positions=lowerCamelCase__ ,end_positions=lowerCamelCase__ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : Any ,lowerCamelCase__ : List[Any] ):
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = MobileBertForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase__ = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : str ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : int ):
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = MobileBertForTokenClassification(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase__ = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self : int ,lowerCamelCase__ : Dict ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Any ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Any ,lowerCamelCase__ : str ):
UpperCAmelCase__ = self.num_choices
UpperCAmelCase__ = MobileBertForMultipleChoice(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase__ = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase__ = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase__ = model(
lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,labels=lowerCamelCase__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) = config_and_inputs
UpperCAmelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class snake_case ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case__ = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ = True
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Any ,lowerCamelCase__ : Optional[int]=False ):
UpperCAmelCase__ = super()._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ,return_labels=lowerCamelCase__ )
if return_labels:
if model_class in get_values(lowerCamelCase__ ):
UpperCAmelCase__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=lowerCamelCase__ )
UpperCAmelCase__ = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowerCamelCase__ )
return inputs_dict
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = MobileBertModelTester(self )
UpperCAmelCase__ = ConfigTester(self ,config_class=lowerCamelCase__ ,hidden_size=37 )
def __lowerCAmelCase ( self : int ):
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase__ )
def __lowerCAmelCase ( self : Dict ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase__ )
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase__ )
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase__ )
def __lowerCAmelCase ( self : Dict ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase__ )
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase__ )
def __lowerCAmelCase ( self : Dict ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase__ )
def a_ ( lowerCamelCase ):
return torch.tensor(
lowerCamelCase , dtype=torch.long , device=lowerCamelCase , )
lowerCAmelCase__ : Dict = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(lowerCamelCase__ )
UpperCAmelCase__ = _long_tensor([[101, 7_110, 1_005, 1_056, 2_023, 11_333, 17_413, 1_029, 102]] )
with torch.no_grad():
UpperCAmelCase__ = model(lowerCamelCase__ )[0]
UpperCAmelCase__ = torch.Size((1, 9, 512) )
self.assertEqual(output.shape ,lowerCamelCase__ )
UpperCAmelCase__ = torch.tensor(
[
[
[-2.473_6526e07, 8.269_1656e04, 1.652_1838e05],
[-5.754_1704e-01, 3.905_6022e00, 4.401_1507e00],
[2.604_7359e00, 1.567_7652e00, -1.732_4188e-01],
]
] ,device=lowerCamelCase__ ,)
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
UpperCAmelCase__ = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
UpperCAmelCase__ = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 632 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase__ : int = {
'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'],
'tokenization_tapas': ['TapasTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Union[str, Any] = [
'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TapasForMaskedLM',
'TapasForQuestionAnswering',
'TapasForSequenceClassification',
'TapasModel',
'TapasPreTrainedModel',
'load_tf_weights_in_tapas',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Optional[Any] = [
'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFTapasForMaskedLM',
'TFTapasForQuestionAnswering',
'TFTapasForSequenceClassification',
'TFTapasModel',
'TFTapasPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 632 | 1 |
"""simple docstring"""
from datetime import datetime as dt
import os
from github import Github
lowerCAmelCase__ : Optional[int] = [
'good first issue',
'good second issue',
'good difficult issue',
'feature request',
'new model',
'wip',
]
def a_ ( ):
UpperCAmelCase__ = Github(os.environ['GITHUB_TOKEN'] )
UpperCAmelCase__ = g.get_repo('huggingface/transformers' )
UpperCAmelCase__ = repo.get_issues(state='open' )
for issue in open_issues:
UpperCAmelCase__ = sorted([comment for comment in issue.get_comments()] , key=lambda lowerCamelCase : i.created_at , reverse=lowerCamelCase )
UpperCAmelCase__ = comments[0] if len(lowerCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='closed' )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 632 | """simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
lowerCAmelCase__ : Optional[int] = False
class snake_case ( unittest.TestCase ):
"""simple docstring"""
pass
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion' )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe(
image=lowerCamelCase__ ,generator=lowerCamelCase__ ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type='numpy' ,).images
UpperCAmelCase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase__ = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 632 | 1 |
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
lowerCAmelCase__ : Any = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n'
lowerCAmelCase__ : Any = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n'
lowerCAmelCase__ : List[str] = r'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def __lowerCAmelCase ( self : Any ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) ,reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] ,)
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : int=False ):
UpperCAmelCase__ = spearmanr(lowerCamelCase__ ,lowerCamelCase__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 632 | """simple docstring"""
from __future__ import annotations
from math import ceil, floor, sqrt
def a_ ( lowerCamelCase = 2_0_0_0_0_0_0 ):
UpperCAmelCase__ = [0]
UpperCAmelCase__ = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
UpperCAmelCase__ = 0
# the area corresponding to the grid that gives the product closest to target
UpperCAmelCase__ = 0
# an estimate of b, using the quadratic formula
UpperCAmelCase__ = 42
# the largest integer less than b_estimate
UpperCAmelCase__ = 42
# the largest integer less than b_estimate
UpperCAmelCase__ = 42
# the triangle number corresponding to b_floor
UpperCAmelCase__ = 42
# the triangle number corresponding to b_ceil
UpperCAmelCase__ = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
UpperCAmelCase__ = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
UpperCAmelCase__ = floor(lowerCamelCase )
UpperCAmelCase__ = ceil(lowerCamelCase )
UpperCAmelCase__ = triangle_numbers[b_floor]
UpperCAmelCase__ = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
UpperCAmelCase__ = triangle_b_first_guess * triangle_a
UpperCAmelCase__ = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
UpperCAmelCase__ = triangle_b_second_guess * triangle_a
UpperCAmelCase__ = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F"""{solution() = }""")
| 632 | 1 |
"""simple docstring"""
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
lowerCAmelCase__ : Union[str, Any] = HfArgumentParser(InitializationArguments)
lowerCAmelCase__ : int = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
lowerCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
lowerCAmelCase__ : List[Any] = {
'vocab_size': len(tokenizer),
'scale_attn_by_inverse_layer_idx': True,
'reorder_and_upcast_attn': True,
}
# Load model config (GPT-2 large in this case)
lowerCAmelCase__ : Union[str, Any] = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
lowerCAmelCase__ : Optional[Any] = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 632 | """simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase__ : Optional[Any] = ['model.decoder.embed_positions.weights']
def a_ ( lowerCamelCase ):
if "emb" in name:
UpperCAmelCase__ = name.replace('emb' , 'model.decoder.embed_tokens' )
if "transformer" in name:
UpperCAmelCase__ = name.replace('transformer' , 'model.decoder' )
if "cross_attention" in name:
UpperCAmelCase__ = name.replace('cross_attention' , 'encoder_attn' )
if "linear1" in name:
UpperCAmelCase__ = name.replace('linear1' , 'fc1' )
if "linear2" in name:
UpperCAmelCase__ = name.replace('linear2' , 'fc2' )
if "norm1" in name:
UpperCAmelCase__ = name.replace('norm1' , 'self_attn_layer_norm' )
if "norm_cross" in name:
UpperCAmelCase__ = name.replace('norm_cross' , 'encoder_attn_layer_norm' )
if "norm2" in name:
UpperCAmelCase__ = name.replace('norm2' , 'final_layer_norm' )
if "out_norm" in name:
UpperCAmelCase__ = name.replace('out_norm' , 'model.decoder.layer_norm' )
if "linears" in name:
UpperCAmelCase__ = name.replace('linears' , 'lm_heads' )
if "condition_provider.conditioners.description.output_proj" in name:
UpperCAmelCase__ = name.replace('condition_provider.conditioners.description.output_proj' , 'enc_to_dec_proj' )
return name
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = list(state_dict.keys() )
UpperCAmelCase__ = {}
for key in keys:
UpperCAmelCase__ = state_dict.pop(lowerCamelCase )
UpperCAmelCase__ = rename_keys(lowerCamelCase )
if "in_proj_weight" in key:
# split fused qkv proj
UpperCAmelCase__ = val[:hidden_size, :]
UpperCAmelCase__ = val[hidden_size : 2 * hidden_size, :]
UpperCAmelCase__ = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
UpperCAmelCase__ = val
else:
UpperCAmelCase__ = val
return state_dict, enc_dec_proj_state_dict
def a_ ( lowerCamelCase ):
if checkpoint == "small":
# default config values
UpperCAmelCase__ = 1_0_2_4
UpperCAmelCase__ = 2_4
UpperCAmelCase__ = 1_6
elif checkpoint == "medium":
UpperCAmelCase__ = 1_5_3_6
UpperCAmelCase__ = 4_8
UpperCAmelCase__ = 2_4
elif checkpoint == "large":
UpperCAmelCase__ = 2_0_4_8
UpperCAmelCase__ = 4_8
UpperCAmelCase__ = 3_2
else:
raise ValueError(f'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
UpperCAmelCase__ = MusicgenDecoderConfig(
hidden_size=lowerCamelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=lowerCamelCase , num_attention_heads=lowerCamelCase , )
return config
@torch.no_grad()
def a_ ( lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="cpu" ):
UpperCAmelCase__ = MusicGen.get_pretrained(lowerCamelCase , device=lowerCamelCase )
UpperCAmelCase__ = decoder_config_from_checkpoint(lowerCamelCase )
UpperCAmelCase__ = fairseq_model.lm.state_dict()
UpperCAmelCase__ , UpperCAmelCase__ = rename_state_dict(
lowerCamelCase , hidden_size=decoder_config.hidden_size )
UpperCAmelCase__ = TaEncoderModel.from_pretrained('t5-base' )
UpperCAmelCase__ = EncodecModel.from_pretrained('facebook/encodec_32khz' )
UpperCAmelCase__ = MusicgenForCausalLM(lowerCamelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
UpperCAmelCase__ , UpperCAmelCase__ = decoder.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
for key in missing_keys.copy():
if key.startswith(('text_encoder', 'audio_encoder') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowerCamelCase )
if len(lowerCamelCase ) > 0:
raise ValueError(f'''Missing key(s) in state_dict: {missing_keys}''' )
if len(lowerCamelCase ) > 0:
raise ValueError(f'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
UpperCAmelCase__ = MusicgenForConditionalGeneration(text_encoder=lowerCamelCase , audio_encoder=lowerCamelCase , decoder=lowerCamelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowerCamelCase )
# check we can do a forward pass
UpperCAmelCase__ = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
UpperCAmelCase__ = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
UpperCAmelCase__ = model(input_ids=lowerCamelCase , decoder_input_ids=lowerCamelCase ).logits
if logits.shape != (8, 1, 2_0_4_8):
raise ValueError('Incorrect shape for logits' )
# now construct the processor
UpperCAmelCase__ = AutoTokenizer.from_pretrained('t5-base' )
UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained('facebook/encodec_32khz' , padding_side='left' )
UpperCAmelCase__ = MusicgenProcessor(feature_extractor=lowerCamelCase , tokenizer=lowerCamelCase )
# set the appropriate bos/pad token ids
UpperCAmelCase__ = 2_0_4_8
UpperCAmelCase__ = 2_0_4_8
# set other default generation config params
UpperCAmelCase__ = int(3_0 * audio_encoder.config.frame_rate )
UpperCAmelCase__ = True
UpperCAmelCase__ = 3.0
if pytorch_dump_folder is not None:
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
logger.info(f'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(lowerCamelCase )
processor.save_pretrained(lowerCamelCase )
if repo_id:
logger.info(f'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(lowerCamelCase )
processor.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint',
default='small',
type=str,
help='Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.',
)
parser.add_argument(
'--pytorch_dump_folder',
required=True,
default=None,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
parser.add_argument(
'--device', default='cpu', type=str, help='Torch device to run the conversion, either cpu or cuda.'
)
lowerCAmelCase__ : List[str] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 632 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.