code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"configuration_vision_encoder_decoder": ["VisionEncoderDecoderConfig", "VisionEncoderDecoderOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = ["VisionEncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = ["TFVisionEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = ["FlaxVisionEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
SCREAMING_SNAKE_CASE__ : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 270 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def __magic_name__ ( __lowerCAmelCase : dict , __lowerCAmelCase : str , __lowerCAmelCase : set , __lowerCAmelCase : set , __lowerCAmelCase : dict , __lowerCAmelCase : dict , __lowerCAmelCase : PriorityQueue , __lowerCAmelCase : dict , __lowerCAmelCase : float | int , ) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
__lowerCamelCase = cst_fwd.get(__lowerCAmelCase , np.inf )
__lowerCamelCase = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
__lowerCamelCase = new_cost_f
__lowerCamelCase = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
__lowerCamelCase = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : dict , __lowerCAmelCase : dict ) -> int:
__lowerCamelCase = -1
__lowerCamelCase = set()
__lowerCamelCase = set()
__lowerCamelCase = {source: 0}
__lowerCamelCase = {destination: 0}
__lowerCamelCase = {source: None}
__lowerCamelCase = {destination: None}
__lowerCamelCase = PriorityQueue()
__lowerCamelCase = PriorityQueue()
__lowerCamelCase = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
__lowerCamelCase , __lowerCamelCase = queue_forward.get()
visited_forward.add(__lowerCAmelCase )
__lowerCamelCase , __lowerCamelCase = queue_backward.get()
visited_backward.add(__lowerCAmelCase )
__lowerCamelCase = pass_and_relaxation(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
__lowerCamelCase = pass_and_relaxation(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
__lowerCamelCase = shortest_distance
return shortest_path_distance
SCREAMING_SNAKE_CASE__ : List[Any] = {
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["G", 2]],
"F": [],
"G": [["F", 1]],
}
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"B": [["E", 1]],
"C": [["B", 1]],
"D": [["C", 1]],
"F": [["D", 1], ["G", 1]],
"E": [[None, np.inf]],
"G": [["E", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 270 | 1 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[Any] = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] ) -> List[Any]:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.''' )
if tokenizer_name is None:
__lowerCamelCase = TOKENIZER_CLASSES
else:
__lowerCamelCase = {tokenizer_name: getattr(__lowerCAmelCase , tokenizer_name + '''Fast''' )}
logger.info(f'''Loading tokenizer classes: {tokenizer_names}''' )
for tokenizer_name in tokenizer_names:
__lowerCamelCase = TOKENIZER_CLASSES[tokenizer_name]
__lowerCamelCase = True
if checkpoint_name is None:
__lowerCamelCase = list(tokenizer_class.max_model_input_sizes.keys() )
else:
__lowerCamelCase = [checkpoint_name]
logger.info(f'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''' )
for checkpoint in checkpoint_names:
logger.info(f'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''' )
# Load tokenizer
__lowerCamelCase = tokenizer_class.from_pretrained(__lowerCAmelCase , force_download=__lowerCAmelCase )
# Save fast tokenizer
logger.info(f'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''' )
# For organization names we create sub-directories
if "/" in checkpoint:
__lowerCamelCase , __lowerCamelCase = checkpoint.split('''/''' )
__lowerCamelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
elif add_prefix:
__lowerCamelCase = checkpoint
__lowerCamelCase = dump_path
else:
__lowerCamelCase = None
__lowerCamelCase = dump_path
logger.info(f'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
__lowerCamelCase = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
__lowerCamelCase = file_path.split(__lowerCAmelCase )[-1][0]
if next_char == "/":
__lowerCamelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = None
logger.info(f'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
__lowerCamelCase = tokenizer.save_pretrained(
__lowerCAmelCase , legacy_format=__lowerCAmelCase , filename_prefix=__lowerCAmelCase )
logger.info(f'''=> File names {file_names}''' )
for file_name in file_names:
if not file_name.endswith('''tokenizer.json''' ):
os.remove(__lowerCAmelCase )
logger.info(f'''=> removing {file_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
F'Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
SCREAMING_SNAKE_CASE__ : Dict = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 270 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE__ : List[Any] = TypeVar("T")
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
return (position - 1) // 2
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
return (2 * position) + 1
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
return (2 * position) + 2
class lowerCAmelCase__ ( Generic[T] ):
def __init__( self : List[str] ) -> None:
__lowerCamelCase = []
__lowerCamelCase = {}
__lowerCamelCase = 0
def __len__( self : Optional[int] ) -> int:
return self.elements
def __repr__( self : Optional[int] ) -> str:
return str(self.heap )
def __A ( self : Union[str, Any] ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def __A ( self : str , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
__lowerCamelCase = self.elements
self.elements += 1
self._bubble_up(SCREAMING_SNAKE_CASE__ )
def __A ( self : Any ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
__lowerCamelCase , __lowerCamelCase = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__lowerCamelCase , __lowerCamelCase = self.heap[0]
self._bubble_down(SCREAMING_SNAKE_CASE__ )
return elem
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Update the weight of the given key
__lowerCamelCase = self.position_map[elem]
__lowerCamelCase = (elem, weight)
if position > 0:
__lowerCamelCase = get_parent_position(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(SCREAMING_SNAKE_CASE__ )
else:
self._bubble_down(SCREAMING_SNAKE_CASE__ )
else:
self._bubble_down(SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : T ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
__lowerCamelCase = self.position_map[elem]
if curr_pos == 0:
return None
__lowerCamelCase = get_parent_position(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase = self.heap[curr_pos]
__lowerCamelCase , __lowerCamelCase = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_up(SCREAMING_SNAKE_CASE__ )
return None
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : T ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
__lowerCamelCase = self.position_map[elem]
__lowerCamelCase , __lowerCamelCase = self.heap[curr_pos]
__lowerCamelCase = get_child_left_position(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = get_child_right_position(SCREAMING_SNAKE_CASE__ )
if child_left_position < self.elements and child_right_position < self.elements:
__lowerCamelCase , __lowerCamelCase = self.heap[child_left_position]
__lowerCamelCase , __lowerCamelCase = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
if child_left_position < self.elements:
__lowerCamelCase , __lowerCamelCase = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
else:
return None
if child_right_position < self.elements:
__lowerCamelCase , __lowerCamelCase = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
return None
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Swap the nodes at the given positions
__lowerCamelCase = self.heap[nodea_pos][0]
__lowerCamelCase = self.heap[nodea_pos][0]
__lowerCamelCase , __lowerCamelCase = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__lowerCamelCase = nodea_pos
__lowerCamelCase = nodea_pos
class lowerCAmelCase__ ( Generic[T] ):
def __init__( self : Tuple ) -> None:
__lowerCamelCase = {}
__lowerCamelCase = 0
def __repr__( self : Optional[int] ) -> str:
return str(self.connections )
def __len__( self : List[str] ) -> int:
return self.nodes
def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : T ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
__lowerCamelCase = {}
self.nodes += 1
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(SCREAMING_SNAKE_CASE__ )
self.add_node(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = weight
__lowerCamelCase = weight
def __magic_name__ ( __lowerCAmelCase : GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
__lowerCamelCase = {node: maxsize for node in graph.connections}
__lowerCamelCase = {node: None for node in graph.connections}
__lowerCamelCase = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(__lowerCAmelCase , __lowerCAmelCase )
if priority_queue.is_empty():
return dist, parent
# initialization
__lowerCamelCase = priority_queue.extract_min()
__lowerCamelCase = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__lowerCamelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowerCAmelCase , dist[neighbour] )
__lowerCamelCase = node
# running prim's algorithm
while not priority_queue.is_empty():
__lowerCamelCase = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__lowerCamelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowerCAmelCase , dist[neighbour] )
__lowerCamelCase = node
return dist, parent
| 270 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"configuration_mvp": ["MVP_PRETRAINED_CONFIG_ARCHIVE_MAP", "MvpConfig", "MvpOnnxConfig"],
"tokenization_mvp": ["MvpTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = ["MvpTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
"MVP_PRETRAINED_MODEL_ARCHIVE_LIST",
"MvpForCausalLM",
"MvpForConditionalGeneration",
"MvpForQuestionAnswering",
"MvpForSequenceClassification",
"MvpModel",
"MvpPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 270 |
from __future__ import annotations
from statistics import mean
def __magic_name__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int ) -> list[int]:
__lowerCamelCase = [0] * no_of_processes
__lowerCamelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(__lowerCAmelCase ):
__lowerCamelCase = burst_time[i]
__lowerCamelCase = []
__lowerCamelCase = 0
__lowerCamelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__lowerCamelCase = []
__lowerCamelCase = -1
for i in range(__lowerCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
__lowerCamelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__lowerCamelCase = i
total_time += burst_time[target_process]
completed += 1
__lowerCamelCase = 0
__lowerCamelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def __magic_name__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : list[int] ) -> list[int]:
__lowerCamelCase = [0] * no_of_processes
for i in range(__lowerCAmelCase ):
__lowerCamelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("[TEST CASE 01]")
SCREAMING_SNAKE_CASE__ : Tuple = 4
SCREAMING_SNAKE_CASE__ : Optional[int] = [2, 5, 3, 7]
SCREAMING_SNAKE_CASE__ : List[str] = [0, 0, 0, 0]
SCREAMING_SNAKE_CASE__ : str = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time")
for i, process_id in enumerate(list(range(1, 5))):
print(
F'{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'
F'{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'
)
print(F'\nAverage waiting time = {mean(waiting_time):.5f}')
print(F'Average turnaround time = {mean(turn_around_time):.5f}')
| 270 | 1 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def __magic_name__ ( __lowerCAmelCase : dict , __lowerCAmelCase : str , __lowerCAmelCase : set , __lowerCAmelCase : set , __lowerCAmelCase : dict , __lowerCAmelCase : dict , __lowerCAmelCase : PriorityQueue , __lowerCAmelCase : dict , __lowerCAmelCase : float | int , ) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
__lowerCamelCase = cst_fwd.get(__lowerCAmelCase , np.inf )
__lowerCamelCase = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
__lowerCamelCase = new_cost_f
__lowerCamelCase = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
__lowerCamelCase = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : dict , __lowerCAmelCase : dict ) -> int:
__lowerCamelCase = -1
__lowerCamelCase = set()
__lowerCamelCase = set()
__lowerCamelCase = {source: 0}
__lowerCamelCase = {destination: 0}
__lowerCamelCase = {source: None}
__lowerCamelCase = {destination: None}
__lowerCamelCase = PriorityQueue()
__lowerCamelCase = PriorityQueue()
__lowerCamelCase = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
__lowerCamelCase , __lowerCamelCase = queue_forward.get()
visited_forward.add(__lowerCAmelCase )
__lowerCamelCase , __lowerCamelCase = queue_backward.get()
visited_backward.add(__lowerCAmelCase )
__lowerCamelCase = pass_and_relaxation(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
__lowerCamelCase = pass_and_relaxation(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
__lowerCamelCase = shortest_distance
return shortest_path_distance
SCREAMING_SNAKE_CASE__ : List[Any] = {
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["G", 2]],
"F": [],
"G": [["F", 1]],
}
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"B": [["E", 1]],
"C": [["B", 1]],
"D": [["C", 1]],
"F": [["D", 1], ["G", 1]],
"E": [[None, np.inf]],
"G": [["E", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 270 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowerCAmelCase__ ( __lowercase ):
@staticmethod
@abstractmethod
def __A ( SCREAMING_SNAKE_CASE__ : ArgumentParser ) -> str:
raise NotImplementedError()
@abstractmethod
def __A ( self : Optional[int] ) -> Union[str, Any]:
raise NotImplementedError()
| 270 | 1 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] ) -> List[Any]:
__lowerCamelCase = []
for part_id in partition_order:
__lowerCamelCase = df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(__lowerCAmelCase ):
expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def __magic_name__ ( ) -> List[Any]:
__lowerCamelCase = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__lowerCamelCase = spark.range(100 ).repartition(1 )
__lowerCamelCase = Spark(__lowerCAmelCase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def __magic_name__ ( ) -> Dict:
__lowerCamelCase = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__lowerCamelCase = spark.range(10 ).repartition(2 )
__lowerCamelCase = [1, 0]
__lowerCamelCase = _generate_iterable_examples(__lowerCAmelCase , __lowerCAmelCase ) # Reverse the partitions.
__lowerCamelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(__lowerCAmelCase , __lowerCAmelCase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
__lowerCamelCase , __lowerCamelCase = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __magic_name__ ( ) -> int:
__lowerCamelCase = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__lowerCamelCase = spark.range(10 ).repartition(1 )
__lowerCamelCase = SparkExamplesIterable(__lowerCAmelCase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__lowerCAmelCase ):
assert row_id == f'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def __magic_name__ ( ) -> Tuple:
__lowerCamelCase = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__lowerCamelCase = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
__lowerCamelCase = lambda __lowerCAmelCase : x.reverse()
__lowerCamelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(__lowerCAmelCase , [2, 1, 0] )
__lowerCamelCase = SparkExamplesIterable(__lowerCAmelCase ).shuffle_data_sources(__lowerCAmelCase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__lowerCAmelCase ):
__lowerCamelCase , __lowerCamelCase = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __magic_name__ ( ) -> Optional[Any]:
__lowerCamelCase = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__lowerCamelCase = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
__lowerCamelCase = SparkExamplesIterable(__lowerCAmelCase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
__lowerCamelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(__lowerCAmelCase , [0, 2] )
for i, (row_id, row_dict) in enumerate(__lowerCAmelCase ):
__lowerCamelCase , __lowerCamelCase = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
__lowerCamelCase = SparkExamplesIterable(__lowerCAmelCase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
__lowerCamelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(__lowerCAmelCase , [1, 3] )
for i, (row_id, row_dict) in enumerate(__lowerCAmelCase ):
__lowerCamelCase , __lowerCamelCase = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __magic_name__ ( ) -> Dict:
__lowerCamelCase = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__lowerCamelCase = spark.range(100 ).repartition(1 )
__lowerCamelCase = Spark(__lowerCAmelCase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 270 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class lowerCAmelCase__ ( unittest.TestCase ):
def __A ( self : Optional[int] ) -> List[Any]:
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='''utf-8''' , check=SCREAMING_SNAKE_CASE__ , )
assert hasattr(self , '''env''' )
def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]:
# configuration for running training on smdistributed Model Parallel
__lowerCamelCase = {
'''enabled''': True,
'''processes_per_host''': 8,
}
__lowerCamelCase = {
'''enabled''': True,
'''parameters''': {
'''microbatches''': 4,
'''placement_strategy''': '''spread''',
'''pipeline''': '''interleaved''',
'''optimize''': '''speed''',
'''partitions''': 4,
'''ddp''': True,
},
}
__lowerCamelCase = {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options}
__lowerCamelCase = '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=SCREAMING_SNAKE_CASE__ , instance_type=self.instance_type , debugger_hook_config=SCREAMING_SNAKE_CASE__ , hyperparameters={
**self.env.hyperparameters,
'''model_name_or_path''': self.model_name_or_path,
'''max_steps''': 5_00,
} , metric_definitions=self.env.metric_definitions , distribution=SCREAMING_SNAKE_CASE__ , py_version='''py36''' , )
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[str]:
TrainingJobAnalytics(SCREAMING_SNAKE_CASE__ ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
# create estimator
__lowerCamelCase = self.create_estimator(SCREAMING_SNAKE_CASE__ )
# run training
estimator.fit()
# result dataframe
__lowerCamelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__lowerCamelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
__lowerCamelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowerCamelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , SCREAMING_SNAKE_CASE__ )
| 270 | 1 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowerCAmelCase__ ( __lowercase ):
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : pyspark.sql.DataFrame , SCREAMING_SNAKE_CASE__ : Optional[NamedSplit] = None , SCREAMING_SNAKE_CASE__ : Optional[Features] = None , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : str = "arrow" , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> int:
super().__init__(
split=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ , streaming=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = load_from_cache_file
__lowerCamelCase = file_format
__lowerCamelCase = Spark(
df=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , working_dir=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def __A ( self : Union[str, Any] ) -> Any:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
__lowerCamelCase = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=SCREAMING_SNAKE_CASE__ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 270 |
from __future__ import annotations
from fractions import Fraction
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __magic_name__ ( __lowerCAmelCase : int ) -> list[str]:
__lowerCamelCase = []
__lowerCamelCase = 11
__lowerCamelCase = int('''1''' + '''0''' * digit_len )
for num in range(__lowerCAmelCase , __lowerCAmelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(__lowerCAmelCase , __lowerCAmelCase ):
solutions.append(f'''{num}/{den}''' )
den += 1
num += 1
__lowerCamelCase = 10
return solutions
def __magic_name__ ( __lowerCAmelCase : int = 2 ) -> int:
__lowerCamelCase = 1.0
for fraction in fraction_list(__lowerCAmelCase ):
__lowerCamelCase = Fraction(__lowerCAmelCase )
result *= frac.denominator / frac.numerator
return int(__lowerCAmelCase )
if __name__ == "__main__":
print(solution())
| 270 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any = {"vocab_file": "sentencepiece.bpe.model"}
SCREAMING_SNAKE_CASE__ : Any = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
}
}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"camembert-base": 512,
}
SCREAMING_SNAKE_CASE__ : Optional[Any] = "▁"
class lowerCAmelCase__ ( __lowercase ):
a__ : Dict = VOCAB_FILES_NAMES
a__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
a__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str="<s>" , SCREAMING_SNAKE_CASE__ : Dict="</s>" , SCREAMING_SNAKE_CASE__ : Dict="</s>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="<unk>" , SCREAMING_SNAKE_CASE__ : Any="<pad>" , SCREAMING_SNAKE_CASE__ : Optional[Any]="<mask>" , SCREAMING_SNAKE_CASE__ : Dict=["<s>NOTUSED", "</s>NOTUSED"] , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE__ : Dict , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
__lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
__lowerCamelCase = {'''<s>NOTUSED''': 0, '''<pad>''': 1, '''</s>NOTUSED''': 2, '''<unk>''': 3}
__lowerCamelCase = len(self.fairseq_tokens_to_ids )
__lowerCamelCase = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
__lowerCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
__lowerCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __A ( self : Optional[int] ) -> List[str]:
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def __A ( self : Tuple ) -> Dict:
__lowerCamelCase = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[int]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ )
def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]:
__lowerCamelCase = []
__lowerCamelCase = ''''''
__lowerCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) + token
__lowerCamelCase = True
__lowerCamelCase = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ )
return out_string.strip()
def __getstate__( self : Union[str, Any] ) -> str:
__lowerCamelCase = self.__dict__.copy()
__lowerCamelCase = None
return state
def __setstate__( self : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
__lowerCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__lowerCamelCase = {}
__lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__lowerCamelCase = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , '''wb''' ) as fi:
__lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 270 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ : List[str] = {
"configuration_blip": [
"BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlipConfig",
"BlipTextConfig",
"BlipVisionConfig",
],
"processing_blip": ["BlipProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = ["BlipImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : int = [
"BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlipModel",
"BlipPreTrainedModel",
"BlipForConditionalGeneration",
"BlipForQuestionAnswering",
"BlipVisionModel",
"BlipTextModel",
"BlipForImageTextRetrieval",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Any = [
"TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBlipModel",
"TFBlipPreTrainedModel",
"TFBlipForConditionalGeneration",
"TFBlipForQuestionAnswering",
"TFBlipVisionModel",
"TFBlipTextModel",
"TFBlipForImageTextRetrieval",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 270 | 1 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE__ : Dict = "https://www.indeed.co.in/jobs?q=mobile+app+development&l="
def __magic_name__ ( __lowerCAmelCase : str = "mumbai" ) -> Generator[tuple[str, str], None, None]:
__lowerCamelCase = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
__lowerCamelCase = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
__lowerCamelCase = job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("Bangalore"), 1):
print(F'Job {i:>2} is {job[0]} at {job[1]}')
| 270 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"configuration_wavlm": ["WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "WavLMConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : int = [
"WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"WavLMForAudioFrameClassification",
"WavLMForCTC",
"WavLMForSequenceClassification",
"WavLMForXVector",
"WavLMModel",
"WavLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 270 | 1 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
SCREAMING_SNAKE_CASE__ : Union[str, Any] = re.compile(r"\b(a|an|the)\b", re.UNICODE)
SCREAMING_SNAKE_CASE__ : int = None
def __magic_name__ ( ) -> str:
__lowerCamelCase = argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''' )
parser.add_argument('''data_file''' , metavar='''data.json''' , help='''Input data JSON file.''' )
parser.add_argument('''pred_file''' , metavar='''pred.json''' , help='''Model predictions.''' )
parser.add_argument(
'''--out-file''' , '''-o''' , metavar='''eval.json''' , help='''Write accuracy metrics to file (default is stdout).''' )
parser.add_argument(
'''--na-prob-file''' , '''-n''' , metavar='''na_prob.json''' , help='''Model estimates of probability of no answer.''' )
parser.add_argument(
'''--na-prob-thresh''' , '''-t''' , type=__lowerCAmelCase , default=1.0 , help='''Predict "" if no-answer probability exceeds this (default = 1.0).''' , )
parser.add_argument(
'''--out-image-dir''' , '''-p''' , metavar='''out_images''' , default=__lowerCAmelCase , help='''Save precision-recall curves to directory.''' )
parser.add_argument('''--verbose''' , '''-v''' , action='''store_true''' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Union[str, Any]:
__lowerCamelCase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__lowerCamelCase = bool(qa['''answers''']['''text'''] )
return qid_to_has_ans
def __magic_name__ ( __lowerCAmelCase : Dict ) -> Optional[Any]:
def remove_articles(__lowerCAmelCase : Optional[int] ):
return ARTICLES_REGEX.sub(''' ''' , __lowerCAmelCase )
def white_space_fix(__lowerCAmelCase : Optional[int] ):
return " ".join(text.split() )
def remove_punc(__lowerCAmelCase : Union[str, Any] ):
__lowerCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCAmelCase : Dict ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCAmelCase ) ) ) )
def __magic_name__ ( __lowerCAmelCase : List[Any] ) -> Optional[int]:
if not s:
return []
return normalize_answer(__lowerCAmelCase ).split()
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple ) -> int:
return int(normalize_answer(__lowerCAmelCase ) == normalize_answer(__lowerCAmelCase ) )
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ) -> str:
__lowerCamelCase = get_tokens(__lowerCAmelCase )
__lowerCamelCase = get_tokens(__lowerCAmelCase )
__lowerCamelCase = collections.Counter(__lowerCAmelCase ) & collections.Counter(__lowerCAmelCase )
__lowerCamelCase = sum(common.values() )
if len(__lowerCAmelCase ) == 0 or len(__lowerCAmelCase ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
__lowerCamelCase = 1.0 * num_same / len(__lowerCAmelCase )
__lowerCamelCase = 1.0 * num_same / len(__lowerCAmelCase )
__lowerCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ) -> Optional[Any]:
__lowerCamelCase = {}
__lowerCamelCase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__lowerCamelCase = qa['''id''']
__lowerCamelCase = [t for t in qa['''answers''']['''text'''] if normalize_answer(__lowerCAmelCase )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
__lowerCamelCase = ['''''']
if qid not in preds:
print(f'''Missing prediction for {qid}''' )
continue
__lowerCamelCase = preds[qid]
# Take max over all gold answers
__lowerCamelCase = max(compute_exact(__lowerCAmelCase , __lowerCAmelCase ) for a in gold_answers )
__lowerCamelCase = max(compute_fa(__lowerCAmelCase , __lowerCAmelCase ) for a in gold_answers )
return exact_scores, fa_scores
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] ) -> List[str]:
__lowerCamelCase = {}
for qid, s in scores.items():
__lowerCamelCase = na_probs[qid] > na_prob_thresh
if pred_na:
__lowerCamelCase = float(not qid_to_has_ans[qid] )
else:
__lowerCamelCase = s
return new_scores
def __magic_name__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any]=None ) -> Union[str, Any]:
if not qid_list:
__lowerCamelCase = len(__lowerCAmelCase )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores.values() ) / total),
('''f1''', 100.0 * sum(fa_scores.values() ) / total),
('''total''', total),
] )
else:
__lowerCamelCase = len(__lowerCAmelCase )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('''f1''', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('''total''', total),
] )
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] ) -> int:
for k in new_eval:
__lowerCamelCase = new_eval[k]
def __magic_name__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ) -> Optional[Any]:
plt.step(__lowerCAmelCase , __lowerCAmelCase , color='''b''' , alpha=0.2 , where='''post''' )
plt.fill_between(__lowerCAmelCase , __lowerCAmelCase , step='''post''' , alpha=0.2 , color='''b''' )
plt.xlabel('''Recall''' )
plt.ylabel('''Precision''' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__lowerCAmelCase )
plt.savefig(__lowerCAmelCase )
plt.clf()
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Tuple=None ) -> int:
__lowerCamelCase = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : na_probs[k] )
__lowerCamelCase = 0.0
__lowerCamelCase = 1.0
__lowerCamelCase = 0.0
__lowerCamelCase = [1.0]
__lowerCamelCase = [0.0]
__lowerCamelCase = 0.0
for i, qid in enumerate(__lowerCAmelCase ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
__lowerCamelCase = true_pos / float(i + 1 )
__lowerCamelCase = true_pos / float(__lowerCAmelCase )
if i == len(__lowerCAmelCase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__lowerCAmelCase )
recalls.append(__lowerCAmelCase )
if out_image:
plot_pr_curve(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return {"ap": 100.0 * avg_prec}
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] ) -> List[Any]:
if out_image_dir and not os.path.exists(__lowerCAmelCase ):
os.makedirs(__lowerCAmelCase )
__lowerCamelCase = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_exact.png''' ) , title='''Precision-Recall curve for Exact Match score''' , )
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_f1.png''' ) , title='''Precision-Recall curve for F1 score''' , )
__lowerCamelCase = {k: float(__lowerCAmelCase ) for k, v in qid_to_has_ans.items()}
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_oracle.png''' ) , title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''' , )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_exact''' )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_f1''' )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_oracle''' )
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int ) -> Optional[Any]:
if not qid_list:
return
__lowerCamelCase = [na_probs[k] for k in qid_list]
__lowerCamelCase = np.ones_like(__lowerCAmelCase ) / float(len(__lowerCAmelCase ) )
plt.hist(__lowerCAmelCase , weights=__lowerCAmelCase , bins=20 , range=(0.0, 1.0) )
plt.xlabel('''Model probability of no-answer''' )
plt.ylabel('''Proportion of dataset''' )
plt.title(f'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(__lowerCAmelCase , f'''na_prob_hist_{name}.png''' ) )
plt.clf()
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
__lowerCamelCase = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
__lowerCamelCase = num_no_ans
__lowerCamelCase = cur_score
__lowerCamelCase = 0.0
__lowerCamelCase = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : na_probs[k] )
for i, qid in enumerate(__lowerCAmelCase ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
__lowerCamelCase = scores[qid]
else:
if preds[qid]:
__lowerCamelCase = -1
else:
__lowerCamelCase = 0
cur_score += diff
if cur_score > best_score:
__lowerCamelCase = cur_score
__lowerCamelCase = na_probs[qid]
return 100.0 * best_score / len(__lowerCAmelCase ), best_thresh
def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] ) -> int:
__lowerCamelCase , __lowerCamelCase = find_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase , __lowerCamelCase = find_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = best_exact
__lowerCamelCase = exact_thresh
__lowerCamelCase = best_fa
__lowerCamelCase = fa_thresh
def __magic_name__ ( ) -> Optional[int]:
with open(OPTS.data_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
__lowerCamelCase = dataset_json['''data''']
with open(OPTS.pred_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
else:
__lowerCamelCase = {k: 0.0 for k in preds}
__lowerCamelCase = make_qid_to_has_ans(__lowerCAmelCase ) # maps qid to True/False
__lowerCamelCase = [k for k, v in qid_to_has_ans.items() if v]
__lowerCamelCase = [k for k, v in qid_to_has_ans.items() if not v]
__lowerCamelCase , __lowerCamelCase = get_raw_scores(__lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = apply_no_ans_threshold(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.na_prob_thresh )
__lowerCamelCase = apply_no_ans_threshold(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.na_prob_thresh )
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase )
if has_ans_qids:
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase , qid_list=__lowerCAmelCase )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''HasAns''' )
if no_ans_qids:
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase , qid_list=__lowerCAmelCase )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''NoAns''' )
if OPTS.na_prob_file:
find_all_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir )
histogram_na_prob(__lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir , '''hasAns''' )
histogram_na_prob(__lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir , '''noAns''' )
if OPTS.out_file:
with open(OPTS.out_file , '''w''' ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
else:
print(json.dumps(__lowerCAmelCase , indent=2 ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 270 |
import pprint
import requests
SCREAMING_SNAKE_CASE__ : str = "https://zenquotes.io/api"
def __magic_name__ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def __magic_name__ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : int = random_quotes()
pprint.pprint(response)
| 270 | 1 |
from ..utils import DummyObject, requires_backends
class lowerCAmelCase__ ( metaclass=__lowercase ):
a__ : List[str] = ["""transformers""", """torch""", """note_seq"""]
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ) -> List[str]:
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def __A ( cls : str , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : str ) -> Dict:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def __A ( cls : List[Any] , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 270 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
__lowerCamelCase = prime_factors(__lowerCAmelCase )
if is_square_free(__lowerCAmelCase ):
return -1 if len(__lowerCAmelCase ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 270 | 1 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F'{bindir}/../../examples/pytorch/translation'):
from run_translation import main # noqa
set_seed(42)
SCREAMING_SNAKE_CASE__ : Any = "sshleifer/student_marian_en_ro_6_1"
SCREAMING_SNAKE_CASE__ : Tuple = "sshleifer/tiny-mbart"
@require_torch
class lowerCAmelCase__ ( __lowercase ):
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Optional[int]=True , ) -> Optional[int]:
__lowerCamelCase = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=SCREAMING_SNAKE_CASE__ , num_train_epochs=1 , distributed=SCREAMING_SNAKE_CASE__ , extra_args_str=SCREAMING_SNAKE_CASE__ , predict_with_generate=SCREAMING_SNAKE_CASE__ , do_train=SCREAMING_SNAKE_CASE__ , do_eval=SCREAMING_SNAKE_CASE__ , do_predict=SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = TrainerState.load_from_json(os.path.join(SCREAMING_SNAKE_CASE__ , '''trainer_state.json''' ) ).log_history
if not do_eval:
return
__lowerCamelCase = [log for log in logs if '''eval_loss''' in log.keys()]
__lowerCamelCase = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
__lowerCamelCase = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , SCREAMING_SNAKE_CASE__ )
assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def __A ( self : Optional[int] ) -> int:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def __A ( self : int ) -> List[str]:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ )
@require_torch_multi_gpu
def __A ( self : Optional[Any] ) -> Tuple:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __A ( self : Dict ) -> Tuple:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--sharded_ddp simple''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __A ( self : Optional[int] ) -> List[str]:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--sharded_ddp simple --fp16''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __A ( self : Tuple ) -> Any:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=SCREAMING_SNAKE_CASE__ )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __A ( self : Dict ) -> Tuple:
self.run_seqaseq_quick(
distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=SCREAMING_SNAKE_CASE__ )
@require_apex
@require_torch_gpu
def __A ( self : Union[str, Any] ) -> List[str]:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--fp16 --fp16_backend=apex''' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--fp16 --fp16_backend=apex''' )
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] )
@require_torch_multi_gpu
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[Any]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
__lowerCamelCase = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
__lowerCamelCase = experiments[experiment_id]
__lowerCamelCase = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
__lowerCamelCase = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**SCREAMING_SNAKE_CASE__ , extra_args_str=data['''extra_args_str'''] )
__lowerCamelCase = len(re.findall(SCREAMING_SNAKE_CASE__ , cl.err ) )
self.assertEqual(SCREAMING_SNAKE_CASE__ , data['''n_matches'''] )
@slow
def __A ( self : Any ) -> Optional[Any]:
__lowerCamelCase = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=SCREAMING_SNAKE_CASE__ , learning_rate=3e-4 , num_train_epochs=10 , distributed=SCREAMING_SNAKE_CASE__ , )
# Check metrics
__lowerCamelCase = TrainerState.load_from_json(os.path.join(SCREAMING_SNAKE_CASE__ , '''trainer_state.json''' ) ).log_history
__lowerCamelCase = [log for log in logs if '''eval_loss''' in log.keys()]
__lowerCamelCase = eval_metrics[0]
__lowerCamelCase = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , SCREAMING_SNAKE_CASE__ )
# test if do_predict saves generations and metrics
__lowerCamelCase = os.listdir(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = {os.path.basename(SCREAMING_SNAKE_CASE__ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def __A ( self : Optional[int] ) -> str:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(SCREAMING_SNAKE_CASE__ : str ) -> Tuple[int, float]:
__lowerCamelCase = '''--skip_memory_metrics 0'''
__lowerCamelCase = self.run_trainer(
max_len=1_28 , model_name=SCREAMING_SNAKE_CASE__ , learning_rate=3e-4 , num_train_epochs=1 , optim=SCREAMING_SNAKE_CASE__ , distributed=SCREAMING_SNAKE_CASE__ , extra_args_str=SCREAMING_SNAKE_CASE__ , do_eval=SCREAMING_SNAKE_CASE__ , do_predict=SCREAMING_SNAKE_CASE__ , n_gpus_to_use=1 , )
# Check metrics
__lowerCamelCase = TrainerState.load_from_json(Path(SCREAMING_SNAKE_CASE__ , '''trainer_state.json''' ) ).log_history
__lowerCamelCase = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**20 )
__lowerCamelCase = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**20 )
__lowerCamelCase = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
__lowerCamelCase = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
__lowerCamelCase = gpu_peak_mem_orig + gpu_alloc_mem_orig
__lowerCamelCase = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
__lowerCamelCase = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
__lowerCamelCase = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
f''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
f''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
f''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
f''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , f'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float = 3e-3 , SCREAMING_SNAKE_CASE__ : str = "adafactor" , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : int = None , ) -> List[Any]:
__lowerCamelCase = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
__lowerCamelCase = self.get_auto_remove_tmp_dir()
__lowerCamelCase = f'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(SCREAMING_SNAKE_CASE__ )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(SCREAMING_SNAKE_CASE__ )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
__lowerCamelCase = f'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(SCREAMING_SNAKE_CASE__ )}
'''.split()
__lowerCamelCase = '''
--do_predict
'''.split()
__lowerCamelCase = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
__lowerCamelCase = get_gpu_count()
__lowerCamelCase = get_torch_dist_unique_port()
__lowerCamelCase = f'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
__lowerCamelCase = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=self.get_env() )
else:
__lowerCamelCase = ['''run_translation.py'''] + args
with patch.object(SCREAMING_SNAKE_CASE__ , '''argv''' , SCREAMING_SNAKE_CASE__ ):
main()
return output_dir
| 270 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
SCREAMING_SNAKE_CASE__ : Union[str, Any] = re.compile(r"\b(a|an|the)\b", re.UNICODE)
SCREAMING_SNAKE_CASE__ : int = None
def __magic_name__ ( ) -> str:
__lowerCamelCase = argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''' )
parser.add_argument('''data_file''' , metavar='''data.json''' , help='''Input data JSON file.''' )
parser.add_argument('''pred_file''' , metavar='''pred.json''' , help='''Model predictions.''' )
parser.add_argument(
'''--out-file''' , '''-o''' , metavar='''eval.json''' , help='''Write accuracy metrics to file (default is stdout).''' )
parser.add_argument(
'''--na-prob-file''' , '''-n''' , metavar='''na_prob.json''' , help='''Model estimates of probability of no answer.''' )
parser.add_argument(
'''--na-prob-thresh''' , '''-t''' , type=__lowerCAmelCase , default=1.0 , help='''Predict "" if no-answer probability exceeds this (default = 1.0).''' , )
parser.add_argument(
'''--out-image-dir''' , '''-p''' , metavar='''out_images''' , default=__lowerCAmelCase , help='''Save precision-recall curves to directory.''' )
parser.add_argument('''--verbose''' , '''-v''' , action='''store_true''' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Union[str, Any]:
__lowerCamelCase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__lowerCamelCase = bool(qa['''answers''']['''text'''] )
return qid_to_has_ans
def __magic_name__ ( __lowerCAmelCase : Dict ) -> Optional[Any]:
def remove_articles(__lowerCAmelCase : Optional[int] ):
return ARTICLES_REGEX.sub(''' ''' , __lowerCAmelCase )
def white_space_fix(__lowerCAmelCase : Optional[int] ):
return " ".join(text.split() )
def remove_punc(__lowerCAmelCase : Union[str, Any] ):
__lowerCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCAmelCase : Dict ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCAmelCase ) ) ) )
def __magic_name__ ( __lowerCAmelCase : List[Any] ) -> Optional[int]:
if not s:
return []
return normalize_answer(__lowerCAmelCase ).split()
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple ) -> int:
return int(normalize_answer(__lowerCAmelCase ) == normalize_answer(__lowerCAmelCase ) )
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ) -> str:
__lowerCamelCase = get_tokens(__lowerCAmelCase )
__lowerCamelCase = get_tokens(__lowerCAmelCase )
__lowerCamelCase = collections.Counter(__lowerCAmelCase ) & collections.Counter(__lowerCAmelCase )
__lowerCamelCase = sum(common.values() )
if len(__lowerCAmelCase ) == 0 or len(__lowerCAmelCase ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
__lowerCamelCase = 1.0 * num_same / len(__lowerCAmelCase )
__lowerCamelCase = 1.0 * num_same / len(__lowerCAmelCase )
__lowerCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ) -> Optional[Any]:
__lowerCamelCase = {}
__lowerCamelCase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__lowerCamelCase = qa['''id''']
__lowerCamelCase = [t for t in qa['''answers''']['''text'''] if normalize_answer(__lowerCAmelCase )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
__lowerCamelCase = ['''''']
if qid not in preds:
print(f'''Missing prediction for {qid}''' )
continue
__lowerCamelCase = preds[qid]
# Take max over all gold answers
__lowerCamelCase = max(compute_exact(__lowerCAmelCase , __lowerCAmelCase ) for a in gold_answers )
__lowerCamelCase = max(compute_fa(__lowerCAmelCase , __lowerCAmelCase ) for a in gold_answers )
return exact_scores, fa_scores
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] ) -> List[str]:
__lowerCamelCase = {}
for qid, s in scores.items():
__lowerCamelCase = na_probs[qid] > na_prob_thresh
if pred_na:
__lowerCamelCase = float(not qid_to_has_ans[qid] )
else:
__lowerCamelCase = s
return new_scores
def __magic_name__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any]=None ) -> Union[str, Any]:
if not qid_list:
__lowerCamelCase = len(__lowerCAmelCase )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores.values() ) / total),
('''f1''', 100.0 * sum(fa_scores.values() ) / total),
('''total''', total),
] )
else:
__lowerCamelCase = len(__lowerCAmelCase )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('''f1''', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('''total''', total),
] )
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] ) -> int:
for k in new_eval:
__lowerCamelCase = new_eval[k]
def __magic_name__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ) -> Optional[Any]:
plt.step(__lowerCAmelCase , __lowerCAmelCase , color='''b''' , alpha=0.2 , where='''post''' )
plt.fill_between(__lowerCAmelCase , __lowerCAmelCase , step='''post''' , alpha=0.2 , color='''b''' )
plt.xlabel('''Recall''' )
plt.ylabel('''Precision''' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__lowerCAmelCase )
plt.savefig(__lowerCAmelCase )
plt.clf()
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Tuple=None ) -> int:
__lowerCamelCase = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : na_probs[k] )
__lowerCamelCase = 0.0
__lowerCamelCase = 1.0
__lowerCamelCase = 0.0
__lowerCamelCase = [1.0]
__lowerCamelCase = [0.0]
__lowerCamelCase = 0.0
for i, qid in enumerate(__lowerCAmelCase ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
__lowerCamelCase = true_pos / float(i + 1 )
__lowerCamelCase = true_pos / float(__lowerCAmelCase )
if i == len(__lowerCAmelCase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__lowerCAmelCase )
recalls.append(__lowerCAmelCase )
if out_image:
plot_pr_curve(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return {"ap": 100.0 * avg_prec}
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] ) -> List[Any]:
if out_image_dir and not os.path.exists(__lowerCAmelCase ):
os.makedirs(__lowerCAmelCase )
__lowerCamelCase = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_exact.png''' ) , title='''Precision-Recall curve for Exact Match score''' , )
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_f1.png''' ) , title='''Precision-Recall curve for F1 score''' , )
__lowerCamelCase = {k: float(__lowerCAmelCase ) for k, v in qid_to_has_ans.items()}
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_oracle.png''' ) , title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''' , )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_exact''' )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_f1''' )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_oracle''' )
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int ) -> Optional[Any]:
if not qid_list:
return
__lowerCamelCase = [na_probs[k] for k in qid_list]
__lowerCamelCase = np.ones_like(__lowerCAmelCase ) / float(len(__lowerCAmelCase ) )
plt.hist(__lowerCAmelCase , weights=__lowerCAmelCase , bins=20 , range=(0.0, 1.0) )
plt.xlabel('''Model probability of no-answer''' )
plt.ylabel('''Proportion of dataset''' )
plt.title(f'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(__lowerCAmelCase , f'''na_prob_hist_{name}.png''' ) )
plt.clf()
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
__lowerCamelCase = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
__lowerCamelCase = num_no_ans
__lowerCamelCase = cur_score
__lowerCamelCase = 0.0
__lowerCamelCase = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : na_probs[k] )
for i, qid in enumerate(__lowerCAmelCase ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
__lowerCamelCase = scores[qid]
else:
if preds[qid]:
__lowerCamelCase = -1
else:
__lowerCamelCase = 0
cur_score += diff
if cur_score > best_score:
__lowerCamelCase = cur_score
__lowerCamelCase = na_probs[qid]
return 100.0 * best_score / len(__lowerCAmelCase ), best_thresh
def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] ) -> int:
__lowerCamelCase , __lowerCamelCase = find_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase , __lowerCamelCase = find_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = best_exact
__lowerCamelCase = exact_thresh
__lowerCamelCase = best_fa
__lowerCamelCase = fa_thresh
def __magic_name__ ( ) -> Optional[int]:
with open(OPTS.data_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
__lowerCamelCase = dataset_json['''data''']
with open(OPTS.pred_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
else:
__lowerCamelCase = {k: 0.0 for k in preds}
__lowerCamelCase = make_qid_to_has_ans(__lowerCAmelCase ) # maps qid to True/False
__lowerCamelCase = [k for k, v in qid_to_has_ans.items() if v]
__lowerCamelCase = [k for k, v in qid_to_has_ans.items() if not v]
__lowerCamelCase , __lowerCamelCase = get_raw_scores(__lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = apply_no_ans_threshold(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.na_prob_thresh )
__lowerCamelCase = apply_no_ans_threshold(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.na_prob_thresh )
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase )
if has_ans_qids:
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase , qid_list=__lowerCAmelCase )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''HasAns''' )
if no_ans_qids:
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase , qid_list=__lowerCAmelCase )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''NoAns''' )
if OPTS.na_prob_file:
find_all_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir )
histogram_na_prob(__lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir , '''hasAns''' )
histogram_na_prob(__lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir , '''noAns''' )
if OPTS.out_file:
with open(OPTS.out_file , '''w''' ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
else:
print(json.dumps(__lowerCAmelCase , indent=2 ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 270 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
def __A ( self : Dict ) -> Union[str, Any]:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__lowerCamelCase = [[1, 2, 4], [1, 2, 3, 4]]
__lowerCamelCase = DisjunctiveConstraint(SCREAMING_SNAKE_CASE__ )
self.assertTrue(isinstance(dc.token_ids , SCREAMING_SNAKE_CASE__ ) )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __A ( self : Tuple ) -> Union[str, Any]:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__lowerCamelCase = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
DisjunctiveConstraint(SCREAMING_SNAKE_CASE__ ) # fails here
def __A ( self : int ) -> Any:
__lowerCamelCase = [[1, 2, 3], [1, 2, 4]]
__lowerCamelCase = DisjunctiveConstraint(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = dc.update(1 )
__lowerCamelCase = stepped is True and completed is False and reset is False
self.assertTrue(SCREAMING_SNAKE_CASE__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = dc.update(2 )
__lowerCamelCase = stepped is True and completed is False and reset is False
self.assertTrue(SCREAMING_SNAKE_CASE__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = dc.update(3 )
__lowerCamelCase = stepped is True and completed is True and reset is False
self.assertTrue(SCREAMING_SNAKE_CASE__ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __A ( self : int ) -> Optional[int]:
__lowerCamelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__lowerCamelCase = DisjunctiveConstraint(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 270 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F'{bindir}/../../examples/pytorch/translation'):
from run_translation import main # noqa
set_seed(42)
SCREAMING_SNAKE_CASE__ : Any = "sshleifer/student_marian_en_ro_6_1"
SCREAMING_SNAKE_CASE__ : Tuple = "sshleifer/tiny-mbart"
@require_torch
class lowerCAmelCase__ ( __lowercase ):
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Optional[int]=True , ) -> Optional[int]:
__lowerCamelCase = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=SCREAMING_SNAKE_CASE__ , num_train_epochs=1 , distributed=SCREAMING_SNAKE_CASE__ , extra_args_str=SCREAMING_SNAKE_CASE__ , predict_with_generate=SCREAMING_SNAKE_CASE__ , do_train=SCREAMING_SNAKE_CASE__ , do_eval=SCREAMING_SNAKE_CASE__ , do_predict=SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = TrainerState.load_from_json(os.path.join(SCREAMING_SNAKE_CASE__ , '''trainer_state.json''' ) ).log_history
if not do_eval:
return
__lowerCamelCase = [log for log in logs if '''eval_loss''' in log.keys()]
__lowerCamelCase = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
__lowerCamelCase = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , SCREAMING_SNAKE_CASE__ )
assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def __A ( self : Optional[int] ) -> int:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def __A ( self : int ) -> List[str]:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ )
@require_torch_multi_gpu
def __A ( self : Optional[Any] ) -> Tuple:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __A ( self : Dict ) -> Tuple:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--sharded_ddp simple''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __A ( self : Optional[int] ) -> List[str]:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--sharded_ddp simple --fp16''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __A ( self : Tuple ) -> Any:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=SCREAMING_SNAKE_CASE__ )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __A ( self : Dict ) -> Tuple:
self.run_seqaseq_quick(
distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=SCREAMING_SNAKE_CASE__ )
@require_apex
@require_torch_gpu
def __A ( self : Union[str, Any] ) -> List[str]:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--fp16 --fp16_backend=apex''' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--fp16 --fp16_backend=apex''' )
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] )
@require_torch_multi_gpu
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[Any]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
__lowerCamelCase = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
__lowerCamelCase = experiments[experiment_id]
__lowerCamelCase = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
__lowerCamelCase = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**SCREAMING_SNAKE_CASE__ , extra_args_str=data['''extra_args_str'''] )
__lowerCamelCase = len(re.findall(SCREAMING_SNAKE_CASE__ , cl.err ) )
self.assertEqual(SCREAMING_SNAKE_CASE__ , data['''n_matches'''] )
@slow
def __A ( self : Any ) -> Optional[Any]:
__lowerCamelCase = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=SCREAMING_SNAKE_CASE__ , learning_rate=3e-4 , num_train_epochs=10 , distributed=SCREAMING_SNAKE_CASE__ , )
# Check metrics
__lowerCamelCase = TrainerState.load_from_json(os.path.join(SCREAMING_SNAKE_CASE__ , '''trainer_state.json''' ) ).log_history
__lowerCamelCase = [log for log in logs if '''eval_loss''' in log.keys()]
__lowerCamelCase = eval_metrics[0]
__lowerCamelCase = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , SCREAMING_SNAKE_CASE__ )
# test if do_predict saves generations and metrics
__lowerCamelCase = os.listdir(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = {os.path.basename(SCREAMING_SNAKE_CASE__ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def __A ( self : Optional[int] ) -> str:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(SCREAMING_SNAKE_CASE__ : str ) -> Tuple[int, float]:
__lowerCamelCase = '''--skip_memory_metrics 0'''
__lowerCamelCase = self.run_trainer(
max_len=1_28 , model_name=SCREAMING_SNAKE_CASE__ , learning_rate=3e-4 , num_train_epochs=1 , optim=SCREAMING_SNAKE_CASE__ , distributed=SCREAMING_SNAKE_CASE__ , extra_args_str=SCREAMING_SNAKE_CASE__ , do_eval=SCREAMING_SNAKE_CASE__ , do_predict=SCREAMING_SNAKE_CASE__ , n_gpus_to_use=1 , )
# Check metrics
__lowerCamelCase = TrainerState.load_from_json(Path(SCREAMING_SNAKE_CASE__ , '''trainer_state.json''' ) ).log_history
__lowerCamelCase = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**20 )
__lowerCamelCase = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**20 )
__lowerCamelCase = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
__lowerCamelCase = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
__lowerCamelCase = gpu_peak_mem_orig + gpu_alloc_mem_orig
__lowerCamelCase = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
__lowerCamelCase = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
__lowerCamelCase = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
f''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
f''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
f''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
f''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , f'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float = 3e-3 , SCREAMING_SNAKE_CASE__ : str = "adafactor" , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : int = None , ) -> List[Any]:
__lowerCamelCase = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
__lowerCamelCase = self.get_auto_remove_tmp_dir()
__lowerCamelCase = f'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(SCREAMING_SNAKE_CASE__ )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(SCREAMING_SNAKE_CASE__ )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
__lowerCamelCase = f'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(SCREAMING_SNAKE_CASE__ )}
'''.split()
__lowerCamelCase = '''
--do_predict
'''.split()
__lowerCamelCase = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
__lowerCamelCase = get_gpu_count()
__lowerCamelCase = get_torch_dist_unique_port()
__lowerCamelCase = f'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
__lowerCamelCase = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=self.get_env() )
else:
__lowerCamelCase = ['''run_translation.py'''] + args
with patch.object(SCREAMING_SNAKE_CASE__ , '''argv''' , SCREAMING_SNAKE_CASE__ ):
main()
return output_dir
| 270 | 1 |
def __magic_name__ ( __lowerCAmelCase : int = 1000 ) -> int:
__lowerCamelCase , __lowerCamelCase = 1, 1
__lowerCamelCase = 2
while True:
__lowerCamelCase = 0
__lowerCamelCase = fa + fa
__lowerCamelCase , __lowerCamelCase = fa, f
index += 1
for _ in str(__lowerCAmelCase ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 270 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ : Dict = {
"facebook/mask2former-swin-small-coco-instance": (
"https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
class lowerCAmelCase__ ( __lowercase ):
a__ : Any = """mask2former"""
a__ : Dict = ["""swin"""]
a__ : Any = {"""hidden_size""": """hidden_dim"""}
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Optional[Dict] = None , SCREAMING_SNAKE_CASE__ : int = 2_56 , SCREAMING_SNAKE_CASE__ : int = 2_56 , SCREAMING_SNAKE_CASE__ : int = 2_56 , SCREAMING_SNAKE_CASE__ : int = 10_24 , SCREAMING_SNAKE_CASE__ : str = "relu" , SCREAMING_SNAKE_CASE__ : int = 6 , SCREAMING_SNAKE_CASE__ : int = 10 , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.0 , SCREAMING_SNAKE_CASE__ : int = 20_48 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : int = 4 , SCREAMING_SNAKE_CASE__ : int = 2_55 , SCREAMING_SNAKE_CASE__ : int = 1_00 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 2.0 , SCREAMING_SNAKE_CASE__ : float = 5.0 , SCREAMING_SNAKE_CASE__ : float = 5.0 , SCREAMING_SNAKE_CASE__ : int = 1_25_44 , SCREAMING_SNAKE_CASE__ : float = 3.0 , SCREAMING_SNAKE_CASE__ : float = 0.75 , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : float = 1.0 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : List[int] = [4, 8, 16, 32] , SCREAMING_SNAKE_CASE__ : bool = None , **SCREAMING_SNAKE_CASE__ : Tuple , ) -> str:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.''' )
__lowerCamelCase = CONFIG_MAPPING['''swin'''](
image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=SCREAMING_SNAKE_CASE__ , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = backbone_config.pop('''model_type''' )
__lowerCamelCase = CONFIG_MAPPING[backbone_model_type]
__lowerCamelCase = config_class.from_dict(SCREAMING_SNAKE_CASE__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
f'''Supported model types: {','.join(self.backbones_supported )}''' )
__lowerCamelCase = backbone_config
__lowerCamelCase = feature_size
__lowerCamelCase = mask_feature_size
__lowerCamelCase = hidden_dim
__lowerCamelCase = encoder_feedforward_dim
__lowerCamelCase = activation_function
__lowerCamelCase = encoder_layers
__lowerCamelCase = decoder_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = dropout
__lowerCamelCase = dim_feedforward
__lowerCamelCase = pre_norm
__lowerCamelCase = enforce_input_projection
__lowerCamelCase = common_stride
__lowerCamelCase = ignore_value
__lowerCamelCase = num_queries
__lowerCamelCase = no_object_weight
__lowerCamelCase = class_weight
__lowerCamelCase = mask_weight
__lowerCamelCase = dice_weight
__lowerCamelCase = train_num_points
__lowerCamelCase = oversample_ratio
__lowerCamelCase = importance_sample_ratio
__lowerCamelCase = init_std
__lowerCamelCase = init_xavier_std
__lowerCamelCase = use_auxiliary_loss
__lowerCamelCase = feature_strides
__lowerCamelCase = output_auxiliary_logits
__lowerCamelCase = decoder_layers
super().__init__(**SCREAMING_SNAKE_CASE__ )
@classmethod
def __A ( cls : Any , SCREAMING_SNAKE_CASE__ : PretrainedConfig , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[Any]:
return cls(
backbone_config=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def __A ( self : Any ) -> Dict[str, any]:
__lowerCamelCase = copy.deepcopy(self.__dict__ )
__lowerCamelCase = self.backbone_config.to_dict()
__lowerCamelCase = self.__class__.model_type
return output
| 270 | 1 |
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 270 |
import os
def __magic_name__ ( ) -> str:
__lowerCamelCase = os.path.join(os.path.dirname(__lowerCAmelCase ) , '''num.txt''' )
with open(__lowerCAmelCase ) as file_hand:
return str(sum(int(__lowerCAmelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 270 | 1 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler")
class lowerCAmelCase__ :
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = False ) -> Any:
__lowerCamelCase = scheduler
__lowerCamelCase = optimizers if isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) else [optimizers]
__lowerCamelCase = split_batches
__lowerCamelCase = step_with_optimizer
__lowerCamelCase = GradientState()
def __A ( self : List[Any] , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> List[str]:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
__lowerCamelCase = AcceleratorState().num_processes
for _ in range(SCREAMING_SNAKE_CASE__ ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , '''total_steps''' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
else:
self.scheduler.step(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __A ( self : str ) -> str:
return self.scheduler.get_last_lr()
def __A ( self : Tuple ) -> int:
return self.scheduler.state_dict()
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> List[Any]:
self.scheduler.load_state_dict(SCREAMING_SNAKE_CASE__ )
def __A ( self : Any ) -> Optional[Any]:
return self.scheduler.get_lr()
def __A ( self : Tuple , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : int ) -> Union[str, Any]:
return self.scheduler.print_lr(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 270 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase__ :
a__ : str
a__ : List[str]
a__ : Optional[List[str]]
@dataclass
class lowerCAmelCase__ :
a__ : List[int]
a__ : List[int]
a__ : Optional[List[int]] = None
a__ : Optional[List[int]] = None
class lowerCAmelCase__ ( __lowercase ):
a__ : Optional[Any] = """train"""
a__ : Optional[int] = """dev"""
a__ : Dict = """test"""
class lowerCAmelCase__ :
@staticmethod
def __A ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[Split, str] ) -> List[InputExample]:
raise NotImplementedError
@staticmethod
def __A ( SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
raise NotImplementedError
@staticmethod
def __A ( SCREAMING_SNAKE_CASE__ : List[InputExample] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : List[str]="[CLS]" , SCREAMING_SNAKE_CASE__ : Tuple=1 , SCREAMING_SNAKE_CASE__ : str="[SEP]" , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : Tuple=0 , SCREAMING_SNAKE_CASE__ : int=0 , SCREAMING_SNAKE_CASE__ : str=-1_00 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : List[Any]=True , ) -> List[InputFeatures]:
__lowerCamelCase = {label: i for i, label in enumerate(SCREAMING_SNAKE_CASE__ )}
__lowerCamelCase = []
for ex_index, example in enumerate(SCREAMING_SNAKE_CASE__ ):
if ex_index % 1_00_00 == 0:
logger.info('''Writing example %d of %d''' , SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = []
__lowerCamelCase = []
for word, label in zip(example.words , example.labels ):
__lowerCamelCase = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(SCREAMING_SNAKE_CASE__ ) > 0:
tokens.extend(SCREAMING_SNAKE_CASE__ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(SCREAMING_SNAKE_CASE__ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
__lowerCamelCase = tokenizer.num_special_tokens_to_add()
if len(SCREAMING_SNAKE_CASE__ ) > max_seq_length - special_tokens_count:
__lowerCamelCase = tokens[: (max_seq_length - special_tokens_count)]
__lowerCamelCase = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
__lowerCamelCase = [sequence_a_segment_id] * len(SCREAMING_SNAKE_CASE__ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
__lowerCamelCase = [cls_token] + tokens
__lowerCamelCase = [pad_token_label_id] + label_ids
__lowerCamelCase = [cls_token_segment_id] + segment_ids
__lowerCamelCase = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
__lowerCamelCase = [1 if mask_padding_with_zero else 0] * len(SCREAMING_SNAKE_CASE__ )
# Zero-pad up to the sequence length.
__lowerCamelCase = max_seq_length - len(SCREAMING_SNAKE_CASE__ )
if pad_on_left:
__lowerCamelCase = ([pad_token] * padding_length) + input_ids
__lowerCamelCase = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
__lowerCamelCase = ([pad_token_segment_id] * padding_length) + segment_ids
__lowerCamelCase = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(SCREAMING_SNAKE_CASE__ ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE__ ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE__ ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE__ ) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''' )
logger.info('''guid: %s''' , example.guid )
logger.info('''tokens: %s''' , ''' '''.join([str(SCREAMING_SNAKE_CASE__ ) for x in tokens] ) )
logger.info('''input_ids: %s''' , ''' '''.join([str(SCREAMING_SNAKE_CASE__ ) for x in input_ids] ) )
logger.info('''input_mask: %s''' , ''' '''.join([str(SCREAMING_SNAKE_CASE__ ) for x in input_mask] ) )
logger.info('''segment_ids: %s''' , ''' '''.join([str(SCREAMING_SNAKE_CASE__ ) for x in segment_ids] ) )
logger.info('''label_ids: %s''' , ''' '''.join([str(SCREAMING_SNAKE_CASE__ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
__lowerCamelCase = None
features.append(
InputFeatures(
input_ids=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , label_ids=SCREAMING_SNAKE_CASE__ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowerCAmelCase__ ( __lowercase ):
a__ : List[InputFeatures]
a__ : int = nn.CrossEntropyLoss().ignore_index
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : TokenClassificationTask , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : Split = Split.train , ) -> Union[str, Any]:
# Load data features from cache or dataset file
__lowerCamelCase = os.path.join(
SCREAMING_SNAKE_CASE__ , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(SCREAMING_SNAKE_CASE__ ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowerCamelCase = cached_features_file + '''.lock'''
with FileLock(SCREAMING_SNAKE_CASE__ ):
if os.path.exists(SCREAMING_SNAKE_CASE__ ) and not overwrite_cache:
logger.info(f'''Loading features from cached file {cached_features_file}''' )
__lowerCamelCase = torch.load(SCREAMING_SNAKE_CASE__ )
else:
logger.info(f'''Creating features from dataset file at {data_dir}''' )
__lowerCamelCase = token_classification_task.read_examples_from_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# TODO clean up all this to leverage built-in features of tokenizers
__lowerCamelCase = token_classification_task.convert_examples_to_features(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=SCREAMING_SNAKE_CASE__ , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f'''Saving features into cached file {cached_features_file}''' )
torch.save(self.features , SCREAMING_SNAKE_CASE__ )
def __len__( self : Dict ) -> str:
return len(self.features )
def __getitem__( self : Any , SCREAMING_SNAKE_CASE__ : Dict ) -> InputFeatures:
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase__ :
a__ : List[InputFeatures]
a__ : int = -100
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : TokenClassificationTask , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Split = Split.train , ) -> List[Any]:
__lowerCamelCase = token_classification_task.read_examples_from_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# TODO clean up all this to leverage built-in features of tokenizers
__lowerCamelCase = token_classification_task.convert_examples_to_features(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=SCREAMING_SNAKE_CASE__ , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
__lowerCamelCase = tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE__ , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
__lowerCamelCase = tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE__ , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None] ),
'''attention_mask''': tf.TensorShape([None] ),
'''token_type_ids''': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def __A ( self : Union[str, Any] ) -> Union[str, Any]:
__lowerCamelCase = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : List[Any] ) -> Any:
return len(self.features )
def __getitem__( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> InputFeatures:
return self.features[i]
| 270 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def __magic_name__ ( __lowerCAmelCase : str ) -> int:
__lowerCamelCase = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class lowerCAmelCase__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
a__ : str = StableDiffusionLatentUpscalePipeline
a__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""height""",
"""width""",
"""cross_attention_kwargs""",
"""negative_prompt_embeds""",
"""prompt_embeds""",
}
a__ : Dict = PipelineTesterMixin.required_optional_params - {"""num_images_per_prompt"""}
a__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a__ : List[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
a__ : Any = frozenset([] )
a__ : str = True
@property
def __A ( self : Optional[int] ) -> int:
__lowerCamelCase = 1
__lowerCamelCase = 4
__lowerCamelCase = (16, 16)
__lowerCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
return image
def __A ( self : List[Any] ) -> Tuple:
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=SCREAMING_SNAKE_CASE__ , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'''KDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
) , in_channels=8 , mid_block_type=SCREAMING_SNAKE_CASE__ , only_cross_attention=SCREAMING_SNAKE_CASE__ , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , )
__lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
__lowerCamelCase = EulerDiscreteScheduler(prediction_type='''sample''' )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''quick_gelu''' , projection_dim=5_12 , )
__lowerCamelCase = CLIPTextModel(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__lowerCamelCase = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 ) -> str:
if str(SCREAMING_SNAKE_CASE__ ).startswith('''mps''' ):
__lowerCamelCase = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
__lowerCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __A ( self : Union[str, Any] ) -> Optional[Any]:
__lowerCamelCase = '''cpu'''
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = pipe(**SCREAMING_SNAKE_CASE__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_56, 2_56, 3) )
__lowerCamelCase = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
__lowerCamelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE__ , 1e-3 )
def __A ( self : str ) -> Optional[Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def __A ( self : List[Any] ) -> Optional[int]:
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def __A ( self : Any ) -> str:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def __A ( self : Any ) -> str:
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def __A ( self : Any ) -> Any:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def __A ( self : Optional[Any] ) -> Optional[Any]:
super().test_save_load_local(expected_max_difference=3e-3 )
def __A ( self : Tuple ) -> Dict:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def __A ( self : Optional[Any] ) -> List[Any]:
__lowerCamelCase = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = 2
__lowerCamelCase = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
__lowerCamelCase = getattr(SCREAMING_SNAKE_CASE__ , scheduler_enum.name )
__lowerCamelCase = scheduler_cls.from_config(pipe.scheduler.config )
__lowerCamelCase = pipe(**SCREAMING_SNAKE_CASE__ )[0]
outputs.append(SCREAMING_SNAKE_CASE__ )
assert check_same_shape(SCREAMING_SNAKE_CASE__ )
@require_torch_gpu
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
def __A ( self : List[Any] ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : List[Any] ) -> Union[str, Any]:
__lowerCamelCase = torch.manual_seed(33 )
__lowerCamelCase = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
__lowerCamelCase = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
__lowerCamelCase = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
__lowerCamelCase = pipe(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , output_type='''latent''' ).images
__lowerCamelCase = upscaler(
prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , num_inference_steps=20 , guidance_scale=0 , generator=SCREAMING_SNAKE_CASE__ , output_type='''np''' , ).images[0]
__lowerCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' )
assert np.abs((expected_image - image).mean() ) < 5e-2
def __A ( self : Any ) -> Union[str, Any]:
__lowerCamelCase = torch.manual_seed(33 )
__lowerCamelCase = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
__lowerCamelCase = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
__lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' )
__lowerCamelCase = upscaler(
prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , num_inference_steps=20 , guidance_scale=0 , generator=SCREAMING_SNAKE_CASE__ , output_type='''np''' , ).images[0]
__lowerCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' )
assert np.abs((expected_image - image).max() ) < 5e-2
| 270 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCAmelCase__ ( __lowercase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : NestedDataStructureLike[PathLike] , SCREAMING_SNAKE_CASE__ : Optional[NamedSplit] = None , SCREAMING_SNAKE_CASE__ : Optional[Features] = None , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[int] = None , **SCREAMING_SNAKE_CASE__ : str , ) -> Union[str, Any]:
super().__init__(
SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ , streaming=SCREAMING_SNAKE_CASE__ , num_proc=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = path_or_paths if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else {self.split: path_or_paths}
__lowerCamelCase = Text(
cache_dir=SCREAMING_SNAKE_CASE__ , data_files=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def __A ( self : int ) -> Dict:
# Build iterable dataset
if self.streaming:
__lowerCamelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
self.builder.download_and_prepare(
download_config=SCREAMING_SNAKE_CASE__ , download_mode=SCREAMING_SNAKE_CASE__ , verification_mode=SCREAMING_SNAKE_CASE__ , base_path=SCREAMING_SNAKE_CASE__ , num_proc=self.num_proc , )
__lowerCamelCase = self.builder.as_dataset(
split=self.split , verification_mode=SCREAMING_SNAKE_CASE__ , in_memory=self.keep_in_memory )
return dataset
| 270 | 1 |
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def __magic_name__ ( *__lowerCAmelCase : int ) -> str:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__lowerCamelCase = list(__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) ):
__lowerCamelCase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def __magic_name__ ( __lowerCAmelCase : Exception ) -> bool:
__lowerCamelCase = [
'''CUDA out of memory.''', # CUDA OOM
'''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU
'''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM
]
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def __magic_name__ ( __lowerCAmelCase : callable = None , __lowerCAmelCase : int = 128 ) -> Tuple:
if function is None:
return functools.partial(__lowerCAmelCase , starting_batch_size=__lowerCAmelCase )
__lowerCamelCase = starting_batch_size
def decorator(*__lowerCAmelCase : Dict , **__lowerCAmelCase : List[str] ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
__lowerCamelCase = list(inspect.signature(__lowerCAmelCase ).parameters.keys() )
# Guard against user error
if len(__lowerCAmelCase ) < (len(__lowerCAmelCase ) + 1):
__lowerCamelCase = ''', '''.join([f'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
f'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError('''No executable batch size found, reached zero.''' )
try:
return function(__lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase )
except Exception as e:
if should_reduce_batch_size(__lowerCAmelCase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 270 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 270 | 1 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
SCREAMING_SNAKE_CASE__ : Tuple = 10
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : list[int] , __lowerCAmelCase : int ) -> int:
for i in range(__lowerCAmelCase , __lowerCAmelCase ):
if array[i] == target:
return i
return -1
def __magic_name__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : int ) -> int:
__lowerCamelCase = 0
__lowerCamelCase = len(__lowerCAmelCase )
while left <= right:
if right - left < precision:
return lin_search(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = (left + right) // 3 + 1
__lowerCamelCase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
__lowerCamelCase = one_third - 1
elif array[two_third] < target:
__lowerCamelCase = two_third + 1
else:
__lowerCamelCase = one_third + 1
__lowerCamelCase = two_third - 1
else:
return -1
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : list[int] , __lowerCAmelCase : int ) -> int:
if left < right:
if right - left < precision:
return lin_search(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = (left + right) // 3 + 1
__lowerCamelCase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(__lowerCAmelCase , one_third - 1 , __lowerCAmelCase , __lowerCAmelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , __lowerCAmelCase , __lowerCAmelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ : List[str] = input("Enter numbers separated by comma:\n").strip()
SCREAMING_SNAKE_CASE__ : Optional[Any] = [int(item.strip()) for item in user_input.split(",")]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
SCREAMING_SNAKE_CASE__ : str = int(input("Enter the number to be found in the list:\n").strip())
SCREAMING_SNAKE_CASE__ : int = ite_ternary_search(collection, target)
SCREAMING_SNAKE_CASE__ : Optional[Any] = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F'Iterative search: {target} found at positions: {resulta}')
print(F'Recursive search: {target} found at positions: {resulta}')
else:
print("Not found")
| 270 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "\\n@inproceedings{popovic-2015-chrf,\n title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",\n month = sep,\n year = \"2015\",\n address = \"Lisbon, Portugal\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W15-3049\",\n doi = \"10.18653/v1/W15-3049\",\n pages = \"392--395\",\n}\n@inproceedings{popovic-2017-chrf,\n title = \"chr{F}++: words helping character n-grams\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Second Conference on Machine Translation\",\n month = sep,\n year = \"2017\",\n address = \"Copenhagen, Denmark\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W17-4770\",\n doi = \"10.18653/v1/W17-4770\",\n pages = \"612--618\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
SCREAMING_SNAKE_CASE__ : int = "\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n"
SCREAMING_SNAKE_CASE__ : Optional[Any] = "\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n 'score' (float): The chrF (chrF++) score,\n 'char_order' (int): The character n-gram order,\n 'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n 'beta' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def __A ( self : Dict ) -> Tuple:
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/mjpost/sacreBLEU#chrf--chrf''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#chrf--chrf'''] , reference_urls=[
'''https://github.com/m-popovic/chrF''',
] , )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int = CHRF.CHAR_ORDER , SCREAMING_SNAKE_CASE__ : int = CHRF.WORD_ORDER , SCREAMING_SNAKE_CASE__ : int = CHRF.BETA , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , ) -> Optional[Any]:
__lowerCamelCase = len(references[0] )
if any(len(SCREAMING_SNAKE_CASE__ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
__lowerCamelCase = [[refs[i] for refs in references] for i in range(SCREAMING_SNAKE_CASE__ )]
__lowerCamelCase = CHRF(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = sb_chrf.corpus_score(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 270 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__)
class lowerCAmelCase__ ( __lowercase ):
a__ : Dict = """encoder-decoder"""
a__ : Optional[int] = True
def __init__( self : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]:
super().__init__(**SCREAMING_SNAKE_CASE__ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
__lowerCamelCase = kwargs.pop('''encoder''' )
__lowerCamelCase = encoder_config.pop('''model_type''' )
__lowerCamelCase = kwargs.pop('''decoder''' )
__lowerCamelCase = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
__lowerCamelCase = AutoConfig.for_model(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = AutoConfig.for_model(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = True
@classmethod
def __A ( cls : Tuple , SCREAMING_SNAKE_CASE__ : PretrainedConfig , SCREAMING_SNAKE_CASE__ : PretrainedConfig , **SCREAMING_SNAKE_CASE__ : Dict ) -> PretrainedConfig:
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
__lowerCamelCase = True
__lowerCamelCase = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **SCREAMING_SNAKE_CASE__ )
def __A ( self : Any ) -> Dict:
__lowerCamelCase = copy.deepcopy(self.__dict__ )
__lowerCamelCase = self.encoder.to_dict()
__lowerCamelCase = self.decoder.to_dict()
__lowerCamelCase = self.__class__.model_type
return output
| 270 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : List[Any] = {
"configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : int = [
"RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ResNetForImageClassification",
"ResNetModel",
"ResNetPreTrainedModel",
"ResNetBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [
"TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFResNetForImageClassification",
"TFResNetModel",
"TFResNetPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
"FlaxResNetForImageClassification",
"FlaxResNetModel",
"FlaxResNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 270 | 1 |
from __future__ import annotations
from fractions import Fraction
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __magic_name__ ( __lowerCAmelCase : int ) -> list[str]:
__lowerCamelCase = []
__lowerCamelCase = 11
__lowerCamelCase = int('''1''' + '''0''' * digit_len )
for num in range(__lowerCAmelCase , __lowerCAmelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(__lowerCAmelCase , __lowerCAmelCase ):
solutions.append(f'''{num}/{den}''' )
den += 1
num += 1
__lowerCamelCase = 10
return solutions
def __magic_name__ ( __lowerCAmelCase : int = 2 ) -> int:
__lowerCamelCase = 1.0
for fraction in fraction_list(__lowerCAmelCase ):
__lowerCamelCase = Fraction(__lowerCAmelCase )
result *= frac.denominator / frac.numerator
return int(__lowerCAmelCase )
if __name__ == "__main__":
print(solution())
| 270 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
SCREAMING_SNAKE_CASE__ : str = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any]=7 , SCREAMING_SNAKE_CASE__ : List[str]=3 , SCREAMING_SNAKE_CASE__ : Tuple=18 , SCREAMING_SNAKE_CASE__ : int=30 , SCREAMING_SNAKE_CASE__ : Any=4_00 , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , ) -> Union[str, Any]:
__lowerCamelCase = size if size is not None else {'''height''': 20, '''width''': 20}
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = num_channels
__lowerCamelCase = image_size
__lowerCamelCase = min_resolution
__lowerCamelCase = max_resolution
__lowerCamelCase = size
__lowerCamelCase = do_normalize
__lowerCamelCase = do_convert_rgb
__lowerCamelCase = [5_12, 10_24, 20_48, 40_96]
__lowerCamelCase = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
def __A ( self : Union[str, Any] ) -> Union[str, Any]:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __A ( self : int ) -> Any:
__lowerCamelCase = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
__lowerCamelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __lowercase , unittest.TestCase ):
a__ : Dict = PixaStructImageProcessor if is_vision_available() else None
def __A ( self : Optional[Any] ) -> List[Any]:
__lowerCamelCase = PixaStructImageProcessingTester(self )
@property
def __A ( self : Optional[int] ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Tuple ) -> Dict:
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_convert_rgb''' ) )
def __A ( self : int ) -> str:
__lowerCamelCase = self.image_processor_tester.prepare_dummy_image()
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
__lowerCamelCase = 20_48
__lowerCamelCase = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def __A ( self : Union[str, Any] ) -> Dict:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __A ( self : Dict ) -> str:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
__lowerCamelCase = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
__lowerCamelCase = '''Hello'''
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ , header_text=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ , header_text=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __A ( self : List[str] ) -> Any:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __A ( self : Tuple ) -> List[str]:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __lowercase , unittest.TestCase ):
a__ : Any = PixaStructImageProcessor if is_vision_available() else None
def __A ( self : Union[str, Any] ) -> Optional[Any]:
__lowerCamelCase = PixaStructImageProcessingTester(self , num_channels=4 )
__lowerCamelCase = 3
@property
def __A ( self : List[Any] ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Union[str, Any] ) -> List[str]:
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_convert_rgb''' ) )
def __A ( self : Optional[int] ) -> str:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 270 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ : Tuple = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : int = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 270 |
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class lowerCAmelCase__ ( __lowercase ):
def __init__( self : int ) -> Optional[int]:
__lowerCamelCase = []
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple:
self.events.append('''on_init_end''' )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
self.events.append('''on_train_begin''' )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : int ) -> Any:
self.events.append('''on_train_end''' )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
self.events.append('''on_epoch_begin''' )
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[Any]:
self.events.append('''on_epoch_end''' )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : str ) -> List[Any]:
self.events.append('''on_step_begin''' )
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
self.events.append('''on_step_end''' )
def __A ( self : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]:
self.events.append('''on_evaluate''' )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : str ) -> str:
self.events.append('''on_predict''' )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple:
self.events.append('''on_save''' )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> List[str]:
self.events.append('''on_log''' )
def __A ( self : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str:
self.events.append('''on_prediction_step''' )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
def __A ( self : Union[str, Any] ) -> Optional[Any]:
__lowerCamelCase = tempfile.mkdtemp()
def __A ( self : int ) -> List[str]:
shutil.rmtree(self.output_dir )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any]=0 , SCREAMING_SNAKE_CASE__ : Any=0 , SCREAMING_SNAKE_CASE__ : List[str]=64 , SCREAMING_SNAKE_CASE__ : Optional[int]=64 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=False , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Union[str, Any]:
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
__lowerCamelCase = RegressionDataset(length=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = RegressionDataset(length=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = RegressionModelConfig(a=SCREAMING_SNAKE_CASE__ , b=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = RegressionPreTrainedModel(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = TrainingArguments(self.output_dir , disable_tqdm=SCREAMING_SNAKE_CASE__ , report_to=[] , **SCREAMING_SNAKE_CASE__ )
return Trainer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , train_dataset=SCREAMING_SNAKE_CASE__ , eval_dataset=SCREAMING_SNAKE_CASE__ , callbacks=SCREAMING_SNAKE_CASE__ , )
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str ) -> Optional[int]:
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
# Order doesn't matter
__lowerCamelCase = sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : cb.__name__ if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cb.__class__.__name__ )
__lowerCamelCase = sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : cb.__name__ if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cb.__class__.__name__ )
for cba, cba in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(SCREAMING_SNAKE_CASE__ , cba.__class__ )
elif not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(cba.__class__ , SCREAMING_SNAKE_CASE__ )
else:
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
__lowerCamelCase = ['''on_init_end''', '''on_train_begin''']
__lowerCamelCase = 0
__lowerCamelCase = len(trainer.get_eval_dataloader() )
__lowerCamelCase = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(SCREAMING_SNAKE_CASE__ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def __A ( self : Union[str, Any] ) -> int:
__lowerCamelCase = self.get_trainer()
__lowerCamelCase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
# Callbacks passed at init are added to the default callbacks
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
__lowerCamelCase = self.get_trainer(disable_tqdm=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
def __A ( self : List[Any] ) -> str:
__lowerCamelCase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
__lowerCamelCase = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(SCREAMING_SNAKE_CASE__ )
expected_callbacks.remove(SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.get_trainer()
__lowerCamelCase = trainer.pop_callback(SCREAMING_SNAKE_CASE__ )
self.assertEqual(cb.__class__ , SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
trainer.add_callback(SCREAMING_SNAKE_CASE__ )
expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
# We can also add, pop, or remove by instance
__lowerCamelCase = self.get_trainer()
__lowerCamelCase = trainer.callback_handler.callbacks[0]
trainer.remove_callback(SCREAMING_SNAKE_CASE__ )
expected_callbacks.remove(SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.get_trainer()
__lowerCamelCase = trainer.callback_handler.callbacks[0]
__lowerCamelCase = trainer.pop_callback(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
trainer.add_callback(SCREAMING_SNAKE_CASE__ )
expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
def __A ( self : Union[str, Any] ) -> Any:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' , category=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
# Independent log/save/eval
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='''steps''' )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='''epoch''' )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
# A bit of everything
__lowerCamelCase = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='''steps''' , )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
__lowerCamelCase = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(SCREAMING_SNAKE_CASE__ ) in warn_mock.call_args[0][0]
| 270 | 1 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
SCREAMING_SNAKE_CASE__ : str = importlib.util.find_spec("s3fs") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
SCREAMING_SNAKE_CASE__ : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def __magic_name__ ( __lowerCAmelCase : str ) -> str:
if "://" in dataset_path:
__lowerCamelCase = dataset_path.split('''://''' )[1]
return dataset_path
def __magic_name__ ( __lowerCAmelCase : fsspec.AbstractFileSystem ) -> bool:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def __magic_name__ ( __lowerCAmelCase : fsspec.AbstractFileSystem , __lowerCAmelCase : str , __lowerCAmelCase : str ) -> Optional[int]:
__lowerCamelCase = not is_remote_filesystem(__lowerCAmelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__lowerCAmelCase ) , fs._strip_protocol(__lowerCAmelCase ) )
else:
fs.mv(__lowerCAmelCase , __lowerCAmelCase , recursive=__lowerCAmelCase )
def __magic_name__ ( ) -> None:
if hasattr(fsspec.asyn , '''reset_lock''' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = threading.Lock()
| 270 |
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class lowerCAmelCase__ :
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> Dict:
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
__lowerCamelCase = img
__lowerCamelCase = img.shape[1]
__lowerCamelCase = img.shape[0]
__lowerCamelCase = dst_width
__lowerCamelCase = dst_height
__lowerCamelCase = self.src_w / self.dst_w
__lowerCamelCase = self.src_h / self.dst_h
__lowerCamelCase = __lowerCamelCase = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 2_55
)
def __A ( self : List[Any] ) -> str:
for i in range(self.dst_h ):
for j in range(self.dst_w ):
__lowerCamelCase = self.img[self.get_y(SCREAMING_SNAKE_CASE__ )][self.get_x(SCREAMING_SNAKE_CASE__ )]
def __A ( self : str , SCREAMING_SNAKE_CASE__ : int ) -> int:
return int(self.ratio_x * x )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : int ) -> int:
return int(self.ratio_y * y )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = 800, 600
SCREAMING_SNAKE_CASE__ : int = imread("image_data/lena.jpg", 1)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F'Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}', n.output
)
waitKey(0)
destroyAllWindows()
| 270 | 1 |
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 270 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def __magic_name__ ( __lowerCAmelCase : dict , __lowerCAmelCase : str , __lowerCAmelCase : set , __lowerCAmelCase : set , __lowerCAmelCase : dict , __lowerCAmelCase : dict , __lowerCAmelCase : PriorityQueue , __lowerCAmelCase : dict , __lowerCAmelCase : float | int , ) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
__lowerCamelCase = cst_fwd.get(__lowerCAmelCase , np.inf )
__lowerCamelCase = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
__lowerCamelCase = new_cost_f
__lowerCamelCase = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
__lowerCamelCase = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : dict , __lowerCAmelCase : dict ) -> int:
__lowerCamelCase = -1
__lowerCamelCase = set()
__lowerCamelCase = set()
__lowerCamelCase = {source: 0}
__lowerCamelCase = {destination: 0}
__lowerCamelCase = {source: None}
__lowerCamelCase = {destination: None}
__lowerCamelCase = PriorityQueue()
__lowerCamelCase = PriorityQueue()
__lowerCamelCase = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
__lowerCamelCase , __lowerCamelCase = queue_forward.get()
visited_forward.add(__lowerCAmelCase )
__lowerCamelCase , __lowerCamelCase = queue_backward.get()
visited_backward.add(__lowerCAmelCase )
__lowerCamelCase = pass_and_relaxation(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
__lowerCamelCase = pass_and_relaxation(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
__lowerCamelCase = shortest_distance
return shortest_path_distance
SCREAMING_SNAKE_CASE__ : List[Any] = {
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["G", 2]],
"F": [],
"G": [["F", 1]],
}
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"B": [["E", 1]],
"C": [["B", 1]],
"D": [["C", 1]],
"F": [["D", 1], ["G", 1]],
"E": [[None, np.inf]],
"G": [["E", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 270 | 1 |
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"facebook/detr-resnet-50": "https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class lowerCAmelCase__ ( __lowercase ):
a__ : List[Any] = """detr"""
a__ : int = ["""past_key_values"""]
a__ : Dict = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Dict=3 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1_00 , SCREAMING_SNAKE_CASE__ : Optional[int]=6 , SCREAMING_SNAKE_CASE__ : List[Any]=20_48 , SCREAMING_SNAKE_CASE__ : Tuple=8 , SCREAMING_SNAKE_CASE__ : Optional[int]=6 , SCREAMING_SNAKE_CASE__ : str=20_48 , SCREAMING_SNAKE_CASE__ : str=8 , SCREAMING_SNAKE_CASE__ : Any=0.0 , SCREAMING_SNAKE_CASE__ : Tuple=0.0 , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : str="relu" , SCREAMING_SNAKE_CASE__ : Any=2_56 , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : Dict=0.0 , SCREAMING_SNAKE_CASE__ : List[str]=0.0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE__ : List[str]=1.0 , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : int="sine" , SCREAMING_SNAKE_CASE__ : Tuple="resnet50" , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : int=1 , SCREAMING_SNAKE_CASE__ : Dict=5 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : str=1 , SCREAMING_SNAKE_CASE__ : Optional[int]=1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5 , SCREAMING_SNAKE_CASE__ : Optional[int]=2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , **SCREAMING_SNAKE_CASE__ : int , ) -> Union[str, Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__lowerCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = backbone_config.get('''model_type''' )
__lowerCamelCase = CONFIG_MAPPING[backbone_model_type]
__lowerCamelCase = config_class.from_dict(SCREAMING_SNAKE_CASE__ )
# set timm attributes to None
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None, None, None
__lowerCamelCase = use_timm_backbone
__lowerCamelCase = backbone_config
__lowerCamelCase = num_channels
__lowerCamelCase = num_queries
__lowerCamelCase = d_model
__lowerCamelCase = encoder_ffn_dim
__lowerCamelCase = encoder_layers
__lowerCamelCase = encoder_attention_heads
__lowerCamelCase = decoder_ffn_dim
__lowerCamelCase = decoder_layers
__lowerCamelCase = decoder_attention_heads
__lowerCamelCase = dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = activation_function
__lowerCamelCase = init_std
__lowerCamelCase = init_xavier_std
__lowerCamelCase = encoder_layerdrop
__lowerCamelCase = decoder_layerdrop
__lowerCamelCase = encoder_layers
__lowerCamelCase = auxiliary_loss
__lowerCamelCase = position_embedding_type
__lowerCamelCase = backbone
__lowerCamelCase = use_pretrained_backbone
__lowerCamelCase = dilation
# Hungarian matcher
__lowerCamelCase = class_cost
__lowerCamelCase = bbox_cost
__lowerCamelCase = giou_cost
# Loss coefficients
__lowerCamelCase = mask_loss_coefficient
__lowerCamelCase = dice_loss_coefficient
__lowerCamelCase = bbox_loss_coefficient
__lowerCamelCase = giou_loss_coefficient
__lowerCamelCase = eos_coefficient
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def __A ( self : List[Any] ) -> int:
return self.encoder_attention_heads
@property
def __A ( self : int ) -> int:
return self.d_model
@classmethod
def __A ( cls : List[str] , SCREAMING_SNAKE_CASE__ : PretrainedConfig , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple:
return cls(backbone_config=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __A ( self : Any ) -> Dict[str, any]:
__lowerCamelCase = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__lowerCamelCase = self.backbone_config.to_dict()
__lowerCamelCase = self.__class__.model_type
return output
class lowerCAmelCase__ ( __lowercase ):
a__ : List[str] = version.parse("""1.11""" )
@property
def __A ( self : int ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def __A ( self : List[Any] ) -> float:
return 1e-5
@property
def __A ( self : Dict ) -> int:
return 12
| 270 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE__ : List[Any] = TypeVar("T")
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
return (position - 1) // 2
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
return (2 * position) + 1
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
return (2 * position) + 2
class lowerCAmelCase__ ( Generic[T] ):
def __init__( self : List[str] ) -> None:
__lowerCamelCase = []
__lowerCamelCase = {}
__lowerCamelCase = 0
def __len__( self : Optional[int] ) -> int:
return self.elements
def __repr__( self : Optional[int] ) -> str:
return str(self.heap )
def __A ( self : Union[str, Any] ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def __A ( self : str , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
__lowerCamelCase = self.elements
self.elements += 1
self._bubble_up(SCREAMING_SNAKE_CASE__ )
def __A ( self : Any ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
__lowerCamelCase , __lowerCamelCase = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__lowerCamelCase , __lowerCamelCase = self.heap[0]
self._bubble_down(SCREAMING_SNAKE_CASE__ )
return elem
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Update the weight of the given key
__lowerCamelCase = self.position_map[elem]
__lowerCamelCase = (elem, weight)
if position > 0:
__lowerCamelCase = get_parent_position(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(SCREAMING_SNAKE_CASE__ )
else:
self._bubble_down(SCREAMING_SNAKE_CASE__ )
else:
self._bubble_down(SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : T ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
__lowerCamelCase = self.position_map[elem]
if curr_pos == 0:
return None
__lowerCamelCase = get_parent_position(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase = self.heap[curr_pos]
__lowerCamelCase , __lowerCamelCase = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_up(SCREAMING_SNAKE_CASE__ )
return None
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : T ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
__lowerCamelCase = self.position_map[elem]
__lowerCamelCase , __lowerCamelCase = self.heap[curr_pos]
__lowerCamelCase = get_child_left_position(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = get_child_right_position(SCREAMING_SNAKE_CASE__ )
if child_left_position < self.elements and child_right_position < self.elements:
__lowerCamelCase , __lowerCamelCase = self.heap[child_left_position]
__lowerCamelCase , __lowerCamelCase = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
if child_left_position < self.elements:
__lowerCamelCase , __lowerCamelCase = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
else:
return None
if child_right_position < self.elements:
__lowerCamelCase , __lowerCamelCase = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
return None
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Swap the nodes at the given positions
__lowerCamelCase = self.heap[nodea_pos][0]
__lowerCamelCase = self.heap[nodea_pos][0]
__lowerCamelCase , __lowerCamelCase = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__lowerCamelCase = nodea_pos
__lowerCamelCase = nodea_pos
class lowerCAmelCase__ ( Generic[T] ):
def __init__( self : Tuple ) -> None:
__lowerCamelCase = {}
__lowerCamelCase = 0
def __repr__( self : Optional[int] ) -> str:
return str(self.connections )
def __len__( self : List[str] ) -> int:
return self.nodes
def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : T ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
__lowerCamelCase = {}
self.nodes += 1
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(SCREAMING_SNAKE_CASE__ )
self.add_node(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = weight
__lowerCamelCase = weight
def __magic_name__ ( __lowerCAmelCase : GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
__lowerCamelCase = {node: maxsize for node in graph.connections}
__lowerCamelCase = {node: None for node in graph.connections}
__lowerCamelCase = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(__lowerCAmelCase , __lowerCAmelCase )
if priority_queue.is_empty():
return dist, parent
# initialization
__lowerCamelCase = priority_queue.extract_min()
__lowerCamelCase = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__lowerCamelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowerCAmelCase , dist[neighbour] )
__lowerCamelCase = node
# running prim's algorithm
while not priority_queue.is_empty():
__lowerCamelCase = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__lowerCamelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowerCAmelCase , dist[neighbour] )
__lowerCamelCase = node
return dist, parent
| 270 | 1 |
import os
from math import logaa
def __magic_name__ ( __lowerCAmelCase : str = "base_exp.txt" ) -> int:
__lowerCamelCase = 0
__lowerCamelCase = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(__lowerCAmelCase ) , __lowerCAmelCase ) ) ):
__lowerCamelCase , __lowerCamelCase = list(map(__lowerCAmelCase , line.split(''',''' ) ) )
if x * logaa(__lowerCAmelCase ) > largest:
__lowerCamelCase = x * logaa(__lowerCAmelCase )
__lowerCamelCase = i + 1
return result
if __name__ == "__main__":
print(solution())
| 270 |
from __future__ import annotations
from statistics import mean
def __magic_name__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int ) -> list[int]:
__lowerCamelCase = [0] * no_of_processes
__lowerCamelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(__lowerCAmelCase ):
__lowerCamelCase = burst_time[i]
__lowerCamelCase = []
__lowerCamelCase = 0
__lowerCamelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__lowerCamelCase = []
__lowerCamelCase = -1
for i in range(__lowerCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
__lowerCamelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__lowerCamelCase = i
total_time += burst_time[target_process]
completed += 1
__lowerCamelCase = 0
__lowerCamelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def __magic_name__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : list[int] ) -> list[int]:
__lowerCamelCase = [0] * no_of_processes
for i in range(__lowerCAmelCase ):
__lowerCamelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("[TEST CASE 01]")
SCREAMING_SNAKE_CASE__ : Tuple = 4
SCREAMING_SNAKE_CASE__ : Optional[int] = [2, 5, 3, 7]
SCREAMING_SNAKE_CASE__ : List[str] = [0, 0, 0, 0]
SCREAMING_SNAKE_CASE__ : str = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time")
for i, process_id in enumerate(list(range(1, 5))):
print(
F'{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'
F'{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'
)
print(F'\nAverage waiting time = {mean(waiting_time):.5f}')
print(F'Average turnaround time = {mean(turn_around_time):.5f}')
| 270 | 1 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__ ( __lowercase ):
a__ : str = ["""image_processor""", """tokenizer"""]
a__ : Tuple = """LayoutLMv3ImageProcessor"""
a__ : Dict = ("""LayoutLMv3Tokenizer""", """LayoutLMv3TokenizerFast""")
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Any=None , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]:
__lowerCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = kwargs.pop('''feature_extractor''' )
__lowerCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __call__( self : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , SCREAMING_SNAKE_CASE__ : Union[List[List[int]], List[List[List[int]]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[List[int], List[List[int]]]] = None , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Union[bool, str, PaddingStrategy] = False , SCREAMING_SNAKE_CASE__ : Union[bool, str, TruncationStrategy] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , **SCREAMING_SNAKE_CASE__ : Optional[int] , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
# first, apply the image processor
__lowerCamelCase = self.image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = [text] # add batch dimension (as the image processor always adds a batch dimension)
__lowerCamelCase = features['''words''']
__lowerCamelCase = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , return_overflowing_tokens=SCREAMING_SNAKE_CASE__ , return_special_tokens_mask=SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , return_length=SCREAMING_SNAKE_CASE__ , verbose=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
# add pixel values
__lowerCamelCase = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
__lowerCamelCase = self.get_overflowing_images(SCREAMING_SNAKE_CASE__ , encoded_inputs['''overflow_to_sample_mapping'''] )
__lowerCamelCase = images
return encoded_inputs
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ) -> Optional[Any]:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
__lowerCamelCase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(SCREAMING_SNAKE_CASE__ )} and {len(SCREAMING_SNAKE_CASE__ )}''' )
return images_with_overflow
def __A ( self : Dict , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]:
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __A ( self : List[Any] , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : int ) -> Any:
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def __A ( self : Optional[int] ) -> Tuple:
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def __A ( self : Union[str, Any] ) -> Optional[Any]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , SCREAMING_SNAKE_CASE__ , )
return self.image_processor_class
@property
def __A ( self : Union[str, Any] ) -> str:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , SCREAMING_SNAKE_CASE__ , )
return self.image_processor
| 270 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowerCAmelCase__ ( __lowercase ):
@staticmethod
@abstractmethod
def __A ( SCREAMING_SNAKE_CASE__ : ArgumentParser ) -> str:
raise NotImplementedError()
@abstractmethod
def __A ( self : Optional[int] ) -> Union[str, Any]:
raise NotImplementedError()
| 270 | 1 |
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
SCREAMING_SNAKE_CASE__ : Tuple = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowerCAmelCase__ :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int]=16 , SCREAMING_SNAKE_CASE__ : Tuple=13 , SCREAMING_SNAKE_CASE__ : str=7 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=14 , SCREAMING_SNAKE_CASE__ : Optional[Any]=10 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=19 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5 , SCREAMING_SNAKE_CASE__ : Optional[int]=4 , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=16 , SCREAMING_SNAKE_CASE__ : Any=2 , SCREAMING_SNAKE_CASE__ : Dict=4 , SCREAMING_SNAKE_CASE__ : Any=4 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : List[str]=0.1 , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : Dict=[1, 2, 3, 4, 5] , SCREAMING_SNAKE_CASE__ : List[str]=25 , SCREAMING_SNAKE_CASE__ : List[str]=5 , ) -> List[Any]:
__lowerCamelCase = d_model
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = prediction_length
__lowerCamelCase = context_length
__lowerCamelCase = cardinality
__lowerCamelCase = num_time_features
__lowerCamelCase = lags_sequence
__lowerCamelCase = embedding_dimension
__lowerCamelCase = is_training
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = context_length
__lowerCamelCase = prediction_length + label_length
__lowerCamelCase = label_length
__lowerCamelCase = moving_average
__lowerCamelCase = autocorrelation_factor
def __A ( self : str ) -> Union[str, Any]:
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> Any:
__lowerCamelCase = config.context_length + max(config.lags_sequence )
__lowerCamelCase = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__lowerCamelCase = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__lowerCamelCase = floats_tensor([self.batch_size, _past_length] )
__lowerCamelCase = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__lowerCamelCase = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__lowerCamelCase = floats_tensor([self.batch_size, config.prediction_length] )
__lowerCamelCase = {
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def __A ( self : int ) -> Optional[Any]:
__lowerCamelCase = self.get_config()
__lowerCamelCase = self.prepare_autoformer_inputs_dict(SCREAMING_SNAKE_CASE__ )
return config, inputs_dict
def __A ( self : Optional[int] ) -> Union[str, Any]:
__lowerCamelCase , __lowerCamelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> Union[str, Any]:
__lowerCamelCase = AutoformerModel(config=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ).eval()
__lowerCamelCase = model(**SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = outputs.encoder_last_hidden_state
__lowerCamelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase = model.get_encoder()
encoder.save_pretrained(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = AutoformerEncoder.from_pretrained(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = model.create_network_inputs(**SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__lowerCamelCase = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__lowerCamelCase = encoder(inputs_embeds=SCREAMING_SNAKE_CASE__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
__lowerCamelCase = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__lowerCamelCase = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__lowerCamelCase = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__lowerCamelCase = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase = model.get_decoder()
decoder.save_pretrained(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = AutoformerDecoder.from_pretrained(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = decoder(
trend=SCREAMING_SNAKE_CASE__ , inputs_embeds=SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class lowerCAmelCase__ ( __lowercase , __lowercase , unittest.TestCase ):
a__ : str = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
a__ : int = (AutoformerForPrediction,) if is_torch_available() else ()
a__ : List[Any] = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
a__ : int = False
a__ : Dict = False
a__ : Tuple = False
a__ : Tuple = False
a__ : str = False
a__ : List[Any] = False
def __A ( self : Dict ) -> str:
__lowerCamelCase = AutoformerModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ )
def __A ( self : Any ) -> List[str]:
self.config_tester.run_common_tests()
def __A ( self : List[Any] ) -> Optional[Any]:
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase = model_class.from_pretrained(SCREAMING_SNAKE_CASE__ , output_loading_info=SCREAMING_SNAKE_CASE__ )
self.assertEqual(info['''missing_keys'''] , [] )
def __A ( self : Optional[Any] ) -> Union[str, Any]:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*SCREAMING_SNAKE_CASE__ )
@unittest.skip(reason='''Model has no tokens embeddings''' )
def __A ( self : List[Any] ) -> Union[str, Any]:
pass
def __A ( self : int ) -> Tuple:
__lowerCamelCase = inspect.signature(getattr(SCREAMING_SNAKE_CASE__ , '''forward''' ) )
# The main input is the name of the argument after `self`
__lowerCamelCase = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[int] ) -> List[Any]:
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = [
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''' )
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
] )
self.assertListEqual(arg_names[: len(SCREAMING_SNAKE_CASE__ )] , SCREAMING_SNAKE_CASE__ )
def __A ( self : List[Any] ) -> Optional[Any]:
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = True
__lowerCamelCase = getattr(self.model_tester , '''seq_length''' , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = getattr(self.model_tester , '''decoder_seq_length''' , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = getattr(self.model_tester , '''encoder_seq_length''' , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = getattr(self.model_tester , '''d_model''' , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = getattr(self.model_tester , '''num_attention_heads''' , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = d_model // num_attention_heads
for model_class in self.all_model_classes:
__lowerCamelCase = True
__lowerCamelCase = False
__lowerCamelCase = True
__lowerCamelCase = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCamelCase = True
__lowerCamelCase = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = outputs.encoder_attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__lowerCamelCase = len(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# decoder attentions
__lowerCamelCase = outputs.decoder_attentions
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , (list, tuple) )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__lowerCamelCase = outputs.cross_attentions
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , (list, tuple) )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(out_len + 2 , len(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def __A ( self : List[str] ) -> Optional[int]:
super().test_retain_grad_hidden_states_attentions()
def __magic_name__ ( __lowerCAmelCase : Optional[int]="train-batch.pt" ) -> Union[str, Any]:
__lowerCamelCase = hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''' , filename=__lowerCAmelCase , repo_type='''dataset''' )
__lowerCamelCase = torch.load(__lowerCAmelCase , map_location=__lowerCAmelCase )
return batch
@require_torch
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
def __A ( self : Tuple ) -> Union[str, Any]:
__lowerCamelCase = AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = prepare_batch()
with torch.no_grad():
__lowerCamelCase = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0]
__lowerCamelCase = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=SCREAMING_SNAKE_CASE__ ) )
def __A ( self : Dict ) -> Optional[int]:
__lowerCamelCase = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
__lowerCamelCase = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state
__lowerCamelCase = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=SCREAMING_SNAKE_CASE__ ) )
def __A ( self : int ) -> List[Any]:
__lowerCamelCase = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
__lowerCamelCase = model.generate(
static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , )
__lowerCamelCase = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , SCREAMING_SNAKE_CASE__ , rtol=1e-1 ) )
| 270 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class lowerCAmelCase__ ( unittest.TestCase ):
def __A ( self : Optional[int] ) -> List[Any]:
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='''utf-8''' , check=SCREAMING_SNAKE_CASE__ , )
assert hasattr(self , '''env''' )
def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]:
# configuration for running training on smdistributed Model Parallel
__lowerCamelCase = {
'''enabled''': True,
'''processes_per_host''': 8,
}
__lowerCamelCase = {
'''enabled''': True,
'''parameters''': {
'''microbatches''': 4,
'''placement_strategy''': '''spread''',
'''pipeline''': '''interleaved''',
'''optimize''': '''speed''',
'''partitions''': 4,
'''ddp''': True,
},
}
__lowerCamelCase = {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options}
__lowerCamelCase = '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=SCREAMING_SNAKE_CASE__ , instance_type=self.instance_type , debugger_hook_config=SCREAMING_SNAKE_CASE__ , hyperparameters={
**self.env.hyperparameters,
'''model_name_or_path''': self.model_name_or_path,
'''max_steps''': 5_00,
} , metric_definitions=self.env.metric_definitions , distribution=SCREAMING_SNAKE_CASE__ , py_version='''py36''' , )
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[str]:
TrainingJobAnalytics(SCREAMING_SNAKE_CASE__ ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
# create estimator
__lowerCamelCase = self.create_estimator(SCREAMING_SNAKE_CASE__ )
# run training
estimator.fit()
# result dataframe
__lowerCamelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__lowerCamelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
__lowerCamelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowerCamelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , SCREAMING_SNAKE_CASE__ )
| 270 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase__ ( __lowercase ):
a__ : Any = ["""image_processor""", """tokenizer"""]
a__ : str = """ViTImageProcessor"""
a__ : str = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , **SCREAMING_SNAKE_CASE__ : Tuple ) -> List[str]:
__lowerCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = kwargs.pop('''feature_extractor''' )
__lowerCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __call__( self : str , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Any=None , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[int]:
if text is None and visual_prompt is None and images is None:
raise ValueError('''You have to specify either text, visual prompt or images.''' )
if text is not None and visual_prompt is not None:
raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''' )
if text is not None:
__lowerCamelCase = self.tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if visual_prompt is not None:
__lowerCamelCase = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if images is not None:
__lowerCamelCase = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if visual_prompt is not None and images is not None:
__lowerCamelCase = {
'''pixel_values''': image_features.pixel_values,
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
__lowerCamelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
__lowerCamelCase = {
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE__ ) , tensor_type=SCREAMING_SNAKE_CASE__ )
def __A ( self : str , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def __A ( self : Optional[Any] ) -> List[Any]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , SCREAMING_SNAKE_CASE__ , )
return self.image_processor_class
@property
def __A ( self : List[Any] ) -> Any:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , SCREAMING_SNAKE_CASE__ , )
return self.image_processor
| 270 |
from __future__ import annotations
from fractions import Fraction
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __magic_name__ ( __lowerCAmelCase : int ) -> list[str]:
__lowerCamelCase = []
__lowerCamelCase = 11
__lowerCamelCase = int('''1''' + '''0''' * digit_len )
for num in range(__lowerCAmelCase , __lowerCAmelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(__lowerCAmelCase , __lowerCAmelCase ):
solutions.append(f'''{num}/{den}''' )
den += 1
num += 1
__lowerCamelCase = 10
return solutions
def __magic_name__ ( __lowerCAmelCase : int = 2 ) -> int:
__lowerCamelCase = 1.0
for fraction in fraction_list(__lowerCAmelCase ):
__lowerCamelCase = Fraction(__lowerCAmelCase )
result *= frac.denominator / frac.numerator
return int(__lowerCAmelCase )
if __name__ == "__main__":
print(solution())
| 270 | 1 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def __magic_name__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] ) -> Any:
__lowerCamelCase = checkpoint
__lowerCamelCase = {}
__lowerCamelCase = vae_state_dict['''encoder.conv_in.weight''']
__lowerCamelCase = vae_state_dict['''encoder.conv_in.bias''']
__lowerCamelCase = vae_state_dict['''encoder.conv_out.weight''']
__lowerCamelCase = vae_state_dict['''encoder.conv_out.bias''']
__lowerCamelCase = vae_state_dict['''encoder.norm_out.weight''']
__lowerCamelCase = vae_state_dict['''encoder.norm_out.bias''']
__lowerCamelCase = vae_state_dict['''decoder.conv_in.weight''']
__lowerCamelCase = vae_state_dict['''decoder.conv_in.bias''']
__lowerCamelCase = vae_state_dict['''decoder.conv_out.weight''']
__lowerCamelCase = vae_state_dict['''decoder.conv_out.bias''']
__lowerCamelCase = vae_state_dict['''decoder.norm_out.weight''']
__lowerCamelCase = vae_state_dict['''decoder.norm_out.bias''']
__lowerCamelCase = vae_state_dict['''quant_conv.weight''']
__lowerCamelCase = vae_state_dict['''quant_conv.bias''']
__lowerCamelCase = vae_state_dict['''post_quant_conv.weight''']
__lowerCamelCase = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
__lowerCamelCase = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
__lowerCamelCase = {
layer_id: [key for key in vae_state_dict if f'''down.{layer_id}''' in key] for layer_id in range(__lowerCAmelCase )
}
# Retrieves the keys for the decoder up blocks only
__lowerCamelCase = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
__lowerCamelCase = {
layer_id: [key for key in vae_state_dict if f'''up.{layer_id}''' in key] for layer_id in range(__lowerCAmelCase )
}
for i in range(__lowerCAmelCase ):
__lowerCamelCase = [key for key in down_blocks[i] if f'''down.{i}''' in key and f'''down.{i}.downsample''' not in key]
if f'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict:
__lowerCamelCase = vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.weight''' )
__lowerCamelCase = vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.bias''' )
__lowerCamelCase = renew_vae_resnet_paths(__lowerCAmelCase )
__lowerCamelCase = {'''old''': f'''down.{i}.block''', '''new''': f'''down_blocks.{i}.resnets'''}
assign_to_checkpoint(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , additional_replacements=[meta_path] , config=__lowerCAmelCase )
__lowerCamelCase = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
__lowerCamelCase = 2
for i in range(1 , num_mid_res_blocks + 1 ):
__lowerCamelCase = [key for key in mid_resnets if f'''encoder.mid.block_{i}''' in key]
__lowerCamelCase = renew_vae_resnet_paths(__lowerCAmelCase )
__lowerCamelCase = {'''old''': f'''mid.block_{i}''', '''new''': f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , additional_replacements=[meta_path] , config=__lowerCAmelCase )
__lowerCamelCase = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
__lowerCamelCase = renew_vae_attention_paths(__lowerCAmelCase )
__lowerCamelCase = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , additional_replacements=[meta_path] , config=__lowerCAmelCase )
conv_attn_to_linear(__lowerCAmelCase )
for i in range(__lowerCAmelCase ):
__lowerCamelCase = num_up_blocks - 1 - i
__lowerCamelCase = [
key for key in up_blocks[block_id] if f'''up.{block_id}''' in key and f'''up.{block_id}.upsample''' not in key
]
if f'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict:
__lowerCamelCase = vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.weight'''
]
__lowerCamelCase = vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.bias'''
]
__lowerCamelCase = renew_vae_resnet_paths(__lowerCAmelCase )
__lowerCamelCase = {'''old''': f'''up.{block_id}.block''', '''new''': f'''up_blocks.{i}.resnets'''}
assign_to_checkpoint(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , additional_replacements=[meta_path] , config=__lowerCAmelCase )
__lowerCamelCase = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
__lowerCamelCase = 2
for i in range(1 , num_mid_res_blocks + 1 ):
__lowerCamelCase = [key for key in mid_resnets if f'''decoder.mid.block_{i}''' in key]
__lowerCamelCase = renew_vae_resnet_paths(__lowerCAmelCase )
__lowerCamelCase = {'''old''': f'''mid.block_{i}''', '''new''': f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , additional_replacements=[meta_path] , config=__lowerCAmelCase )
__lowerCamelCase = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
__lowerCamelCase = renew_vae_attention_paths(__lowerCAmelCase )
__lowerCamelCase = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , additional_replacements=[meta_path] , config=__lowerCAmelCase )
conv_attn_to_linear(__lowerCAmelCase )
return new_checkpoint
def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : str , ) -> List[str]:
# Only support V1
__lowerCamelCase = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
__lowerCamelCase = io.BytesIO(r.content )
__lowerCamelCase = OmegaConf.load(__lowerCAmelCase )
__lowerCamelCase = 512
__lowerCamelCase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
__lowerCamelCase = {}
with safe_open(__lowerCAmelCase , framework='''pt''' , device='''cpu''' ) as f:
for key in f.keys():
__lowerCamelCase = f.get_tensor(__lowerCAmelCase )
else:
__lowerCamelCase = torch.load(__lowerCAmelCase , map_location=__lowerCAmelCase )['''state_dict''']
# Convert the VAE model.
__lowerCamelCase = create_vae_diffusers_config(__lowerCAmelCase , image_size=__lowerCAmelCase )
__lowerCamelCase = custom_convert_ldm_vae_checkpoint(__lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = AutoencoderKL(**__lowerCAmelCase )
vae.load_state_dict(__lowerCAmelCase )
vae.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--vae_pt_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
SCREAMING_SNAKE_CASE__ : int = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 270 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ : List[str] = {
"configuration_blip": [
"BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlipConfig",
"BlipTextConfig",
"BlipVisionConfig",
],
"processing_blip": ["BlipProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = ["BlipImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : int = [
"BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlipModel",
"BlipPreTrainedModel",
"BlipForConditionalGeneration",
"BlipForQuestionAnswering",
"BlipVisionModel",
"BlipTextModel",
"BlipForImageTextRetrieval",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Any = [
"TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBlipModel",
"TFBlipPreTrainedModel",
"TFBlipForConditionalGeneration",
"TFBlipForQuestionAnswering",
"TFBlipVisionModel",
"TFBlipTextModel",
"TFBlipForImageTextRetrieval",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 270 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : Optional[int] = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Tuple = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 270 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"configuration_wavlm": ["WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "WavLMConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : int = [
"WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"WavLMForAudioFrameClassification",
"WavLMForCTC",
"WavLMForSequenceClassification",
"WavLMForXVector",
"WavLMModel",
"WavLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 270 | 1 |
class lowerCAmelCase__ :
def __init__( self : int , SCREAMING_SNAKE_CASE__ : list ) -> None:
__lowerCamelCase = set_counts
__lowerCamelCase = max(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = len(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = [1] * num_sets
__lowerCamelCase = list(range(SCREAMING_SNAKE_CASE__ ) )
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> bool:
__lowerCamelCase = self.get_parent(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.get_parent(SCREAMING_SNAKE_CASE__ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
__lowerCamelCase = 0
__lowerCamelCase = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
__lowerCamelCase = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
__lowerCamelCase = 0
__lowerCamelCase = src_parent
__lowerCamelCase = self.set_counts[src_parent]
__lowerCamelCase = max(self.max_set , SCREAMING_SNAKE_CASE__ )
return True
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : int ) -> int:
if self.parents[disj_set] == disj_set:
return disj_set
__lowerCamelCase = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 270 |
import pprint
import requests
SCREAMING_SNAKE_CASE__ : str = "https://zenquotes.io/api"
def __magic_name__ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def __magic_name__ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : int = random_quotes()
pprint.pprint(response)
| 270 | 1 |
# Lint as: python3
import itertools
import os
import re
SCREAMING_SNAKE_CASE__ : Dict = re.compile(r"([A-Z]+)([A-Z][a-z])")
SCREAMING_SNAKE_CASE__ : int = re.compile(r"([a-z\d])([A-Z])")
SCREAMING_SNAKE_CASE__ : List[str] = re.compile(r"(?<!_)_(?!_)")
SCREAMING_SNAKE_CASE__ : Optional[Any] = re.compile(r"(_{2,})")
SCREAMING_SNAKE_CASE__ : Union[str, Any] = r"^\w+(\.\w+)*$"
SCREAMING_SNAKE_CASE__ : Optional[int] = r"<>:/\|?*"
def __magic_name__ ( __lowerCAmelCase : List[Any] ) -> Union[str, Any]:
__lowerCamelCase = _uppercase_uppercase_re.sub(R'''\1_\2''' , __lowerCAmelCase )
__lowerCamelCase = _lowercase_uppercase_re.sub(R'''\1_\2''' , __lowerCAmelCase )
return name.lower()
def __magic_name__ ( __lowerCAmelCase : Tuple ) -> Optional[Any]:
__lowerCamelCase = _single_underscore_re.split(__lowerCAmelCase )
__lowerCamelCase = [_multiple_underscores_re.split(__lowerCAmelCase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(__lowerCAmelCase ) if n != '''''' )
def __magic_name__ ( __lowerCAmelCase : Optional[int] ) -> List[str]:
if os.path.basename(__lowerCAmelCase ) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(__lowerCAmelCase )
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : str ) -> Tuple:
if os.path.basename(__lowerCAmelCase ) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re , __lowerCAmelCase ):
raise ValueError(f'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return f'''{filename_prefix_for_name(__lowerCAmelCase )}-{split}'''
def __magic_name__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any]=None ) -> Optional[int]:
__lowerCamelCase = filename_prefix_for_split(__lowerCAmelCase , __lowerCAmelCase )
if filetype_suffix:
prefix += f'''.{filetype_suffix}'''
__lowerCamelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
return f'''{filepath}*'''
def __magic_name__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : Union[str, Any]=None ) -> Union[str, Any]:
__lowerCamelCase = filename_prefix_for_split(__lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if shard_lengths:
__lowerCamelCase = len(__lowerCAmelCase )
__lowerCamelCase = [f'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(__lowerCAmelCase )]
if filetype_suffix:
__lowerCamelCase = [filename + f'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
__lowerCamelCase = prefix
if filetype_suffix:
filename += f'''.{filetype_suffix}'''
return [filename]
| 270 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
__lowerCamelCase = prime_factors(__lowerCAmelCase )
if is_square_free(__lowerCAmelCase ):
return -1 if len(__lowerCAmelCase ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 270 | 1 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ : str = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model")
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( __lowercase , unittest.TestCase ):
a__ : str = GPTSwaTokenizer
a__ : Optional[Any] = False
a__ : List[Any] = True
a__ : int = False
def __A ( self : Dict ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase = GPTSwaTokenizer(SCREAMING_SNAKE_CASE__ , eos_token='''<unk>''' , bos_token='''<unk>''' , pad_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any ) -> Optional[Any]:
__lowerCamelCase = '''This is a test'''
__lowerCamelCase = '''This is a test'''
return input_text, output_text
def __A ( self : str ) -> str:
__lowerCamelCase = '''<s>'''
__lowerCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def __A ( self : Union[str, Any] ) -> Union[str, Any]:
__lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 20_00 )
def __A ( self : Union[str, Any] ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 20_00 )
def __A ( self : Union[str, Any] ) -> Union[str, Any]:
__lowerCamelCase = GPTSwaTokenizer(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [4_65, 2_87, 2_65, 6_31, 8_42] )
__lowerCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
# fmt: off
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] , )
# fmt: on
__lowerCamelCase = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] , )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
# fmt: off
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] )
# fmt: on
def __A ( self : Any ) -> List[str]:
__lowerCamelCase = GPTSwaTokenizer(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = ['''This is a test''', '''I was born in 92000, and this is falsé.''']
__lowerCamelCase = [
[4_65, 2_87, 2_65, 6_31, 8_42],
[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertListEqual(tokenizer.encode_fast(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
# Test that decode_fast returns the input text
for text, token_ids in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(tokenizer.decode_fast(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
@slow
def __A ( self : List[Any] ) -> str:
__lowerCamelCase = [
'''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''',
'''Hey there, how are you doing this fine day?''',
'''This is a text with a trailing spaces followed by a dot .''',
'''Häj sväjs lillebrör! =)''',
'''Det är inget fel på Mr. Cool''',
]
# fmt: off
__lowerCamelCase = {'''input_ids''': [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ , model_name='''AI-Sweden/gpt-sw3-126m''' , sequences=SCREAMING_SNAKE_CASE__ , )
| 270 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
SCREAMING_SNAKE_CASE__ : Union[str, Any] = re.compile(r"\b(a|an|the)\b", re.UNICODE)
SCREAMING_SNAKE_CASE__ : int = None
def __magic_name__ ( ) -> str:
__lowerCamelCase = argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''' )
parser.add_argument('''data_file''' , metavar='''data.json''' , help='''Input data JSON file.''' )
parser.add_argument('''pred_file''' , metavar='''pred.json''' , help='''Model predictions.''' )
parser.add_argument(
'''--out-file''' , '''-o''' , metavar='''eval.json''' , help='''Write accuracy metrics to file (default is stdout).''' )
parser.add_argument(
'''--na-prob-file''' , '''-n''' , metavar='''na_prob.json''' , help='''Model estimates of probability of no answer.''' )
parser.add_argument(
'''--na-prob-thresh''' , '''-t''' , type=__lowerCAmelCase , default=1.0 , help='''Predict "" if no-answer probability exceeds this (default = 1.0).''' , )
parser.add_argument(
'''--out-image-dir''' , '''-p''' , metavar='''out_images''' , default=__lowerCAmelCase , help='''Save precision-recall curves to directory.''' )
parser.add_argument('''--verbose''' , '''-v''' , action='''store_true''' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Union[str, Any]:
__lowerCamelCase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__lowerCamelCase = bool(qa['''answers''']['''text'''] )
return qid_to_has_ans
def __magic_name__ ( __lowerCAmelCase : Dict ) -> Optional[Any]:
def remove_articles(__lowerCAmelCase : Optional[int] ):
return ARTICLES_REGEX.sub(''' ''' , __lowerCAmelCase )
def white_space_fix(__lowerCAmelCase : Optional[int] ):
return " ".join(text.split() )
def remove_punc(__lowerCAmelCase : Union[str, Any] ):
__lowerCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCAmelCase : Dict ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCAmelCase ) ) ) )
def __magic_name__ ( __lowerCAmelCase : List[Any] ) -> Optional[int]:
if not s:
return []
return normalize_answer(__lowerCAmelCase ).split()
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple ) -> int:
return int(normalize_answer(__lowerCAmelCase ) == normalize_answer(__lowerCAmelCase ) )
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ) -> str:
__lowerCamelCase = get_tokens(__lowerCAmelCase )
__lowerCamelCase = get_tokens(__lowerCAmelCase )
__lowerCamelCase = collections.Counter(__lowerCAmelCase ) & collections.Counter(__lowerCAmelCase )
__lowerCamelCase = sum(common.values() )
if len(__lowerCAmelCase ) == 0 or len(__lowerCAmelCase ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
__lowerCamelCase = 1.0 * num_same / len(__lowerCAmelCase )
__lowerCamelCase = 1.0 * num_same / len(__lowerCAmelCase )
__lowerCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ) -> Optional[Any]:
__lowerCamelCase = {}
__lowerCamelCase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__lowerCamelCase = qa['''id''']
__lowerCamelCase = [t for t in qa['''answers''']['''text'''] if normalize_answer(__lowerCAmelCase )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
__lowerCamelCase = ['''''']
if qid not in preds:
print(f'''Missing prediction for {qid}''' )
continue
__lowerCamelCase = preds[qid]
# Take max over all gold answers
__lowerCamelCase = max(compute_exact(__lowerCAmelCase , __lowerCAmelCase ) for a in gold_answers )
__lowerCamelCase = max(compute_fa(__lowerCAmelCase , __lowerCAmelCase ) for a in gold_answers )
return exact_scores, fa_scores
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] ) -> List[str]:
__lowerCamelCase = {}
for qid, s in scores.items():
__lowerCamelCase = na_probs[qid] > na_prob_thresh
if pred_na:
__lowerCamelCase = float(not qid_to_has_ans[qid] )
else:
__lowerCamelCase = s
return new_scores
def __magic_name__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any]=None ) -> Union[str, Any]:
if not qid_list:
__lowerCamelCase = len(__lowerCAmelCase )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores.values() ) / total),
('''f1''', 100.0 * sum(fa_scores.values() ) / total),
('''total''', total),
] )
else:
__lowerCamelCase = len(__lowerCAmelCase )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('''f1''', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('''total''', total),
] )
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] ) -> int:
for k in new_eval:
__lowerCamelCase = new_eval[k]
def __magic_name__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ) -> Optional[Any]:
plt.step(__lowerCAmelCase , __lowerCAmelCase , color='''b''' , alpha=0.2 , where='''post''' )
plt.fill_between(__lowerCAmelCase , __lowerCAmelCase , step='''post''' , alpha=0.2 , color='''b''' )
plt.xlabel('''Recall''' )
plt.ylabel('''Precision''' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__lowerCAmelCase )
plt.savefig(__lowerCAmelCase )
plt.clf()
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Tuple=None ) -> int:
__lowerCamelCase = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : na_probs[k] )
__lowerCamelCase = 0.0
__lowerCamelCase = 1.0
__lowerCamelCase = 0.0
__lowerCamelCase = [1.0]
__lowerCamelCase = [0.0]
__lowerCamelCase = 0.0
for i, qid in enumerate(__lowerCAmelCase ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
__lowerCamelCase = true_pos / float(i + 1 )
__lowerCamelCase = true_pos / float(__lowerCAmelCase )
if i == len(__lowerCAmelCase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__lowerCAmelCase )
recalls.append(__lowerCAmelCase )
if out_image:
plot_pr_curve(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return {"ap": 100.0 * avg_prec}
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] ) -> List[Any]:
if out_image_dir and not os.path.exists(__lowerCAmelCase ):
os.makedirs(__lowerCAmelCase )
__lowerCamelCase = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_exact.png''' ) , title='''Precision-Recall curve for Exact Match score''' , )
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_f1.png''' ) , title='''Precision-Recall curve for F1 score''' , )
__lowerCamelCase = {k: float(__lowerCAmelCase ) for k, v in qid_to_has_ans.items()}
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_oracle.png''' ) , title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''' , )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_exact''' )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_f1''' )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_oracle''' )
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int ) -> Optional[Any]:
if not qid_list:
return
__lowerCamelCase = [na_probs[k] for k in qid_list]
__lowerCamelCase = np.ones_like(__lowerCAmelCase ) / float(len(__lowerCAmelCase ) )
plt.hist(__lowerCAmelCase , weights=__lowerCAmelCase , bins=20 , range=(0.0, 1.0) )
plt.xlabel('''Model probability of no-answer''' )
plt.ylabel('''Proportion of dataset''' )
plt.title(f'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(__lowerCAmelCase , f'''na_prob_hist_{name}.png''' ) )
plt.clf()
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
__lowerCamelCase = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
__lowerCamelCase = num_no_ans
__lowerCamelCase = cur_score
__lowerCamelCase = 0.0
__lowerCamelCase = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : na_probs[k] )
for i, qid in enumerate(__lowerCAmelCase ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
__lowerCamelCase = scores[qid]
else:
if preds[qid]:
__lowerCamelCase = -1
else:
__lowerCamelCase = 0
cur_score += diff
if cur_score > best_score:
__lowerCamelCase = cur_score
__lowerCamelCase = na_probs[qid]
return 100.0 * best_score / len(__lowerCAmelCase ), best_thresh
def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] ) -> int:
__lowerCamelCase , __lowerCamelCase = find_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase , __lowerCamelCase = find_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = best_exact
__lowerCamelCase = exact_thresh
__lowerCamelCase = best_fa
__lowerCamelCase = fa_thresh
def __magic_name__ ( ) -> Optional[int]:
with open(OPTS.data_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
__lowerCamelCase = dataset_json['''data''']
with open(OPTS.pred_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
else:
__lowerCamelCase = {k: 0.0 for k in preds}
__lowerCamelCase = make_qid_to_has_ans(__lowerCAmelCase ) # maps qid to True/False
__lowerCamelCase = [k for k, v in qid_to_has_ans.items() if v]
__lowerCamelCase = [k for k, v in qid_to_has_ans.items() if not v]
__lowerCamelCase , __lowerCamelCase = get_raw_scores(__lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = apply_no_ans_threshold(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.na_prob_thresh )
__lowerCamelCase = apply_no_ans_threshold(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.na_prob_thresh )
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase )
if has_ans_qids:
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase , qid_list=__lowerCAmelCase )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''HasAns''' )
if no_ans_qids:
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase , qid_list=__lowerCAmelCase )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''NoAns''' )
if OPTS.na_prob_file:
find_all_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir )
histogram_na_prob(__lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir , '''hasAns''' )
histogram_na_prob(__lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir , '''noAns''' )
if OPTS.out_file:
with open(OPTS.out_file , '''w''' ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
else:
print(json.dumps(__lowerCAmelCase , indent=2 ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 270 | 1 |
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ : int = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
SCREAMING_SNAKE_CASE__ : List[Any] = 50_003
SCREAMING_SNAKE_CASE__ : str = 50_002
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( __lowercase , unittest.TestCase ):
a__ : List[str] = PLBartTokenizer
a__ : List[str] = None
a__ : List[str] = False
def __A ( self : List[Any] ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase = PLBartTokenizer(SCREAMING_SNAKE_CASE__ , language_codes='''base''' , keep_accents=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self : int ) -> Optional[Any]:
__lowerCamelCase = PLBartTokenizer(SCREAMING_SNAKE_CASE__ , language_codes='''base''' , keep_accents=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__lowerCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__lowerCamelCase = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
__lowerCamelCase = tokenizer.vocab_size
__lowerCamelCase = [tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ) for x in range(end - 4 , SCREAMING_SNAKE_CASE__ )]
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['''__java__''', '''__python__''', '''__en_XX__''', '''<mask>'''] )
__lowerCamelCase = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
__lowerCamelCase = tokenizer(SCREAMING_SNAKE_CASE__ ).input_ids
self.assertEqual(
tokenizer.decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , )
def __A ( self : Tuple ) -> Union[str, Any]:
__lowerCamelCase = PLBartTokenizer(SCREAMING_SNAKE_CASE__ , language_codes='''multi''' , keep_accents=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__lowerCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__lowerCamelCase = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
__lowerCamelCase = tokenizer.vocab_size
__lowerCamelCase = [tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ) for x in range(end - 7 , SCREAMING_SNAKE_CASE__ )]
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , ['''__java__''', '''__python__''', '''__en_XX__''', '''__javascript__''', '''__php__''', '''__ruby__''', '''__go__'''] )
__lowerCamelCase = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
__lowerCamelCase = tokenizer(SCREAMING_SNAKE_CASE__ ).input_ids
self.assertEqual(
tokenizer.decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
a__ : int = """uclanlp/plbart-python-en_XX"""
a__ : Optional[Any] = [
"""def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])""",
"""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""",
]
a__ : Any = [
"""Returns the maximum value of a b c.""",
"""Sums the values of a b c.""",
]
a__ : Any = [
134,
5_452,
33_460,
33_441,
33_463,
33_465,
33_463,
33_449,
988,
20,
33_456,
19,
33_456,
771,
39,
4_258,
889,
3_318,
33_441,
33_463,
33_465,
33_463,
33_449,
2_471,
2,
PYTHON_CODE,
]
@classmethod
def __A ( cls : Dict ) -> Tuple:
__lowerCamelCase = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes='''base''' , src_lang='''python''' , tgt_lang='''en_XX''' )
__lowerCamelCase = 1
return cls
def __A ( self : str ) -> int:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__java__'''] , 5_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__python__'''] , 5_00_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__en_XX__'''] , 5_00_03 )
def __A ( self : Any ) -> Dict:
__lowerCamelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE__ )
def __A ( self : Tuple ) -> Optional[Any]:
self.assertIn(SCREAMING_SNAKE_CASE__ , self.tokenizer.all_special_ids )
__lowerCamelCase = [EN_CODE, 90_37, 3_34_42, 57, 7_52, 1_53, 14, 56, 18, 9, 2]
__lowerCamelCase = self.tokenizer.decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertNotIn(self.tokenizer.eos_token , SCREAMING_SNAKE_CASE__ )
def __A ( self : List[str] ) -> List[str]:
__lowerCamelCase = ['''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''' * 20]
self.assertIsInstance(src_text[0] , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = 10
__lowerCamelCase = self.tokenizer(SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , SCREAMING_SNAKE_CASE__ )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[Any] ) -> Tuple:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''__java__'''] ) , [5_00_04, 5_00_01] )
def __A ( self : List[Any] ) -> int:
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = PLBartTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , SCREAMING_SNAKE_CASE__ )
@require_torch
def __A ( self : Optional[Any] ) -> List[str]:
__lowerCamelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' )
__lowerCamelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , SCREAMING_SNAKE_CASE__ )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def __A ( self : Any ) -> List[Any]:
__lowerCamelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
__lowerCamelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
__lowerCamelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE__ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def __A ( self : Any ) -> Dict:
__lowerCamelCase = self.tokenizer(self.src_text , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=3 , return_tensors='''pt''' )
__lowerCamelCase = self.tokenizer(
text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=10 , return_tensors='''pt''' )
__lowerCamelCase = targets['''input_ids''']
__lowerCamelCase = shift_tokens_right(SCREAMING_SNAKE_CASE__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __A ( self : Any ) -> int:
__lowerCamelCase = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''java''' )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , {
# A, test, EOS, en_XX
'''input_ids''': [[1_50, 2_42, 2, 5_00_03]],
'''attention_mask''': [[1, 1, 1, 1]],
# java
'''forced_bos_token_id''': 5_00_01,
} , )
| 270 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F'{bindir}/../../examples/pytorch/translation'):
from run_translation import main # noqa
set_seed(42)
SCREAMING_SNAKE_CASE__ : Any = "sshleifer/student_marian_en_ro_6_1"
SCREAMING_SNAKE_CASE__ : Tuple = "sshleifer/tiny-mbart"
@require_torch
class lowerCAmelCase__ ( __lowercase ):
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Optional[int]=True , ) -> Optional[int]:
__lowerCamelCase = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=SCREAMING_SNAKE_CASE__ , num_train_epochs=1 , distributed=SCREAMING_SNAKE_CASE__ , extra_args_str=SCREAMING_SNAKE_CASE__ , predict_with_generate=SCREAMING_SNAKE_CASE__ , do_train=SCREAMING_SNAKE_CASE__ , do_eval=SCREAMING_SNAKE_CASE__ , do_predict=SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = TrainerState.load_from_json(os.path.join(SCREAMING_SNAKE_CASE__ , '''trainer_state.json''' ) ).log_history
if not do_eval:
return
__lowerCamelCase = [log for log in logs if '''eval_loss''' in log.keys()]
__lowerCamelCase = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
__lowerCamelCase = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , SCREAMING_SNAKE_CASE__ )
assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def __A ( self : Optional[int] ) -> int:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def __A ( self : int ) -> List[str]:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ )
@require_torch_multi_gpu
def __A ( self : Optional[Any] ) -> Tuple:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __A ( self : Dict ) -> Tuple:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--sharded_ddp simple''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __A ( self : Optional[int] ) -> List[str]:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--sharded_ddp simple --fp16''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __A ( self : Tuple ) -> Any:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=SCREAMING_SNAKE_CASE__ )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __A ( self : Dict ) -> Tuple:
self.run_seqaseq_quick(
distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=SCREAMING_SNAKE_CASE__ )
@require_apex
@require_torch_gpu
def __A ( self : Union[str, Any] ) -> List[str]:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--fp16 --fp16_backend=apex''' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--fp16 --fp16_backend=apex''' )
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] )
@require_torch_multi_gpu
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[Any]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
__lowerCamelCase = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
__lowerCamelCase = experiments[experiment_id]
__lowerCamelCase = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
__lowerCamelCase = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**SCREAMING_SNAKE_CASE__ , extra_args_str=data['''extra_args_str'''] )
__lowerCamelCase = len(re.findall(SCREAMING_SNAKE_CASE__ , cl.err ) )
self.assertEqual(SCREAMING_SNAKE_CASE__ , data['''n_matches'''] )
@slow
def __A ( self : Any ) -> Optional[Any]:
__lowerCamelCase = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=SCREAMING_SNAKE_CASE__ , learning_rate=3e-4 , num_train_epochs=10 , distributed=SCREAMING_SNAKE_CASE__ , )
# Check metrics
__lowerCamelCase = TrainerState.load_from_json(os.path.join(SCREAMING_SNAKE_CASE__ , '''trainer_state.json''' ) ).log_history
__lowerCamelCase = [log for log in logs if '''eval_loss''' in log.keys()]
__lowerCamelCase = eval_metrics[0]
__lowerCamelCase = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , SCREAMING_SNAKE_CASE__ )
# test if do_predict saves generations and metrics
__lowerCamelCase = os.listdir(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = {os.path.basename(SCREAMING_SNAKE_CASE__ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def __A ( self : Optional[int] ) -> str:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(SCREAMING_SNAKE_CASE__ : str ) -> Tuple[int, float]:
__lowerCamelCase = '''--skip_memory_metrics 0'''
__lowerCamelCase = self.run_trainer(
max_len=1_28 , model_name=SCREAMING_SNAKE_CASE__ , learning_rate=3e-4 , num_train_epochs=1 , optim=SCREAMING_SNAKE_CASE__ , distributed=SCREAMING_SNAKE_CASE__ , extra_args_str=SCREAMING_SNAKE_CASE__ , do_eval=SCREAMING_SNAKE_CASE__ , do_predict=SCREAMING_SNAKE_CASE__ , n_gpus_to_use=1 , )
# Check metrics
__lowerCamelCase = TrainerState.load_from_json(Path(SCREAMING_SNAKE_CASE__ , '''trainer_state.json''' ) ).log_history
__lowerCamelCase = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**20 )
__lowerCamelCase = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**20 )
__lowerCamelCase = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
__lowerCamelCase = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
__lowerCamelCase = gpu_peak_mem_orig + gpu_alloc_mem_orig
__lowerCamelCase = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
__lowerCamelCase = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
__lowerCamelCase = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
f''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
f''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
f''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
f''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , f'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float = 3e-3 , SCREAMING_SNAKE_CASE__ : str = "adafactor" , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : int = None , ) -> List[Any]:
__lowerCamelCase = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
__lowerCamelCase = self.get_auto_remove_tmp_dir()
__lowerCamelCase = f'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(SCREAMING_SNAKE_CASE__ )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(SCREAMING_SNAKE_CASE__ )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
__lowerCamelCase = f'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(SCREAMING_SNAKE_CASE__ )}
'''.split()
__lowerCamelCase = '''
--do_predict
'''.split()
__lowerCamelCase = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
__lowerCamelCase = get_gpu_count()
__lowerCamelCase = get_torch_dist_unique_port()
__lowerCamelCase = f'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
__lowerCamelCase = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=self.get_env() )
else:
__lowerCamelCase = ['''run_translation.py'''] + args
with patch.object(SCREAMING_SNAKE_CASE__ , '''argv''' , SCREAMING_SNAKE_CASE__ ):
main()
return output_dir
| 270 | 1 |
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class lowerCAmelCase__ :
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
__lowerCamelCase = data
__lowerCamelCase = [0X67_45_23_01, 0XEF_CD_AB_89, 0X98_BA_DC_FE, 0X10_32_54_76, 0XC3_D2_E1_F0]
@staticmethod
def __A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> str:
return ((n << b) | (n >> (32 - b))) & 0XFF_FF_FF_FF
def __A ( self : Optional[Any] ) -> Any:
__lowerCamelCase = b'''\x80''' + b'''\x00''' * (63 - (len(self.data ) + 8) % 64)
__lowerCamelCase = self.data + padding + struct.pack('''>Q''' , 8 * len(self.data ) )
return padded_data
def __A ( self : Optional[int] ) -> List[str]:
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> int:
__lowerCamelCase = list(struct.unpack('''>16L''' , SCREAMING_SNAKE_CASE__ ) ) + [0] * 64
for i in range(16 , 80 ):
__lowerCamelCase = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def __A ( self : Union[str, Any] ) -> List[str]:
__lowerCamelCase = self.padding()
__lowerCamelCase = self.split_blocks()
for block in self.blocks:
__lowerCamelCase = self.expand_block(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
__lowerCamelCase = (b & c) | ((~b) & d)
__lowerCamelCase = 0X5A_82_79_99
elif 20 <= i < 40:
__lowerCamelCase = b ^ c ^ d
__lowerCamelCase = 0X6E_D9_EB_A1
elif 40 <= i < 60:
__lowerCamelCase = (b & c) | (b & d) | (c & d)
__lowerCamelCase = 0X8F_1B_BC_DC
elif 60 <= i < 80:
__lowerCamelCase = b ^ c ^ d
__lowerCamelCase = 0XCA_62_C1_D6
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = (
self.rotate(SCREAMING_SNAKE_CASE__ , 5 ) + f + e + k + expanded_block[i] & 0XFF_FF_FF_FF,
a,
self.rotate(SCREAMING_SNAKE_CASE__ , 30 ),
c,
d,
)
__lowerCamelCase = (
self.h[0] + a & 0XFF_FF_FF_FF,
self.h[1] + b & 0XFF_FF_FF_FF,
self.h[2] + c & 0XFF_FF_FF_FF,
self.h[3] + d & 0XFF_FF_FF_FF,
self.h[4] + e & 0XFF_FF_FF_FF,
)
return ("{:08x}" * 5).format(*self.h )
def __magic_name__ ( ) -> Union[str, Any]:
__lowerCamelCase = B'''Test String'''
assert SHAaHash(__lowerCAmelCase ).final_hash() == hashlib.shaa(__lowerCAmelCase ).hexdigest() # noqa: S324
def __magic_name__ ( ) -> List[Any]:
__lowerCamelCase = argparse.ArgumentParser(description='''Process some strings or files''' )
parser.add_argument(
'''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument('''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
__lowerCamelCase = f.read()
else:
__lowerCamelCase = bytes(__lowerCAmelCase , '''utf-8''' )
print(SHAaHash(__lowerCAmelCase ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 270 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ : Dict = {
"facebook/mask2former-swin-small-coco-instance": (
"https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
class lowerCAmelCase__ ( __lowercase ):
a__ : Any = """mask2former"""
a__ : Dict = ["""swin"""]
a__ : Any = {"""hidden_size""": """hidden_dim"""}
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Optional[Dict] = None , SCREAMING_SNAKE_CASE__ : int = 2_56 , SCREAMING_SNAKE_CASE__ : int = 2_56 , SCREAMING_SNAKE_CASE__ : int = 2_56 , SCREAMING_SNAKE_CASE__ : int = 10_24 , SCREAMING_SNAKE_CASE__ : str = "relu" , SCREAMING_SNAKE_CASE__ : int = 6 , SCREAMING_SNAKE_CASE__ : int = 10 , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.0 , SCREAMING_SNAKE_CASE__ : int = 20_48 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : int = 4 , SCREAMING_SNAKE_CASE__ : int = 2_55 , SCREAMING_SNAKE_CASE__ : int = 1_00 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 2.0 , SCREAMING_SNAKE_CASE__ : float = 5.0 , SCREAMING_SNAKE_CASE__ : float = 5.0 , SCREAMING_SNAKE_CASE__ : int = 1_25_44 , SCREAMING_SNAKE_CASE__ : float = 3.0 , SCREAMING_SNAKE_CASE__ : float = 0.75 , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : float = 1.0 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : List[int] = [4, 8, 16, 32] , SCREAMING_SNAKE_CASE__ : bool = None , **SCREAMING_SNAKE_CASE__ : Tuple , ) -> str:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.''' )
__lowerCamelCase = CONFIG_MAPPING['''swin'''](
image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=SCREAMING_SNAKE_CASE__ , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = backbone_config.pop('''model_type''' )
__lowerCamelCase = CONFIG_MAPPING[backbone_model_type]
__lowerCamelCase = config_class.from_dict(SCREAMING_SNAKE_CASE__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
f'''Supported model types: {','.join(self.backbones_supported )}''' )
__lowerCamelCase = backbone_config
__lowerCamelCase = feature_size
__lowerCamelCase = mask_feature_size
__lowerCamelCase = hidden_dim
__lowerCamelCase = encoder_feedforward_dim
__lowerCamelCase = activation_function
__lowerCamelCase = encoder_layers
__lowerCamelCase = decoder_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = dropout
__lowerCamelCase = dim_feedforward
__lowerCamelCase = pre_norm
__lowerCamelCase = enforce_input_projection
__lowerCamelCase = common_stride
__lowerCamelCase = ignore_value
__lowerCamelCase = num_queries
__lowerCamelCase = no_object_weight
__lowerCamelCase = class_weight
__lowerCamelCase = mask_weight
__lowerCamelCase = dice_weight
__lowerCamelCase = train_num_points
__lowerCamelCase = oversample_ratio
__lowerCamelCase = importance_sample_ratio
__lowerCamelCase = init_std
__lowerCamelCase = init_xavier_std
__lowerCamelCase = use_auxiliary_loss
__lowerCamelCase = feature_strides
__lowerCamelCase = output_auxiliary_logits
__lowerCamelCase = decoder_layers
super().__init__(**SCREAMING_SNAKE_CASE__ )
@classmethod
def __A ( cls : Any , SCREAMING_SNAKE_CASE__ : PretrainedConfig , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[Any]:
return cls(
backbone_config=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def __A ( self : Any ) -> Dict[str, any]:
__lowerCamelCase = copy.deepcopy(self.__dict__ )
__lowerCamelCase = self.backbone_config.to_dict()
__lowerCamelCase = self.__class__.model_type
return output
| 270 | 1 |
from __future__ import annotations
from cmath import sqrt
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> tuple[complex, complex]:
if a == 0:
raise ValueError('''Coefficient \'a\' must not be zero.''' )
__lowerCamelCase = b * b - 4 * a * c
__lowerCamelCase = (-b + sqrt(__lowerCAmelCase )) / (2 * a)
__lowerCamelCase = (-b - sqrt(__lowerCAmelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def __magic_name__ ( ) -> Optional[int]:
__lowerCamelCase , __lowerCamelCase = quadratic_roots(a=5 , b=6 , c=1 )
print(f'''The solutions are: {solutiona} and {solutiona}''' )
if __name__ == "__main__":
main()
| 270 |
import os
def __magic_name__ ( ) -> str:
__lowerCamelCase = os.path.join(os.path.dirname(__lowerCAmelCase ) , '''num.txt''' )
with open(__lowerCAmelCase ) as file_hand:
return str(sum(int(__lowerCAmelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 270 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class lowerCAmelCase__ :
def __init__( self : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str]=13 , SCREAMING_SNAKE_CASE__ : Optional[Any]=7 , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=99 , SCREAMING_SNAKE_CASE__ : List[Any]=32 , SCREAMING_SNAKE_CASE__ : Any=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=4 , SCREAMING_SNAKE_CASE__ : Tuple=37 , SCREAMING_SNAKE_CASE__ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5_12 , SCREAMING_SNAKE_CASE__ : Any=16 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3 , SCREAMING_SNAKE_CASE__ : Any=4 , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , ) -> Union[str, Any]:
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
__lowerCamelCase = projection_dim
def __A ( self : List[Any] ) -> Tuple:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , )
__lowerCamelCase = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple:
__lowerCamelCase = TFDPRContextEncoder(config=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple:
__lowerCamelCase = TFDPRQuestionEncoder(config=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[Any]:
__lowerCamelCase = TFDPRReader(config=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def __A ( self : Optional[Any] ) -> str:
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( __lowercase , __lowercase , unittest.TestCase ):
a__ : str = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
a__ : Dict = {"""feature-extraction""": TFDPRQuestionEncoder} if is_tf_available() else {}
a__ : Optional[int] = False
a__ : Dict = False
a__ : Union[str, Any] = False
a__ : Tuple = False
a__ : str = False
def __A ( self : Optional[int] ) -> int:
__lowerCamelCase = TFDPRModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def __A ( self : List[Any] ) -> int:
self.config_tester.run_common_tests()
def __A ( self : int ) -> List[Any]:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*SCREAMING_SNAKE_CASE__ )
def __A ( self : Tuple ) -> List[str]:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[int] ) -> str:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*SCREAMING_SNAKE_CASE__ )
@slow
def __A ( self : Tuple ) -> Optional[Any]:
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = TFDPRContextEncoder.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = TFDPRContextEncoder.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = TFDPRQuestionEncoder.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = TFDPRReader.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __A ( self : int ) -> Tuple:
__lowerCamelCase = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' )
__lowerCamelCase = tf.constant(
[[1_01, 75_92, 10_10, 20_03, 20_26, 38_99, 1_01_40, 10_29, 1_02]] ) # [CLS] hello, is my dog cute? [SEP]
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
__lowerCamelCase = tf.constant(
[
[
0.03236253,
0.12753335,
0.16818509,
0.00279786,
0.3896933,
0.24264945,
0.2178971,
-0.02335227,
-0.08481959,
-0.14324117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 270 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase__ :
a__ : str
a__ : List[str]
a__ : Optional[List[str]]
@dataclass
class lowerCAmelCase__ :
a__ : List[int]
a__ : List[int]
a__ : Optional[List[int]] = None
a__ : Optional[List[int]] = None
class lowerCAmelCase__ ( __lowercase ):
a__ : Optional[Any] = """train"""
a__ : Optional[int] = """dev"""
a__ : Dict = """test"""
class lowerCAmelCase__ :
@staticmethod
def __A ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[Split, str] ) -> List[InputExample]:
raise NotImplementedError
@staticmethod
def __A ( SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
raise NotImplementedError
@staticmethod
def __A ( SCREAMING_SNAKE_CASE__ : List[InputExample] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : List[str]="[CLS]" , SCREAMING_SNAKE_CASE__ : Tuple=1 , SCREAMING_SNAKE_CASE__ : str="[SEP]" , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : Tuple=0 , SCREAMING_SNAKE_CASE__ : int=0 , SCREAMING_SNAKE_CASE__ : str=-1_00 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : List[Any]=True , ) -> List[InputFeatures]:
__lowerCamelCase = {label: i for i, label in enumerate(SCREAMING_SNAKE_CASE__ )}
__lowerCamelCase = []
for ex_index, example in enumerate(SCREAMING_SNAKE_CASE__ ):
if ex_index % 1_00_00 == 0:
logger.info('''Writing example %d of %d''' , SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = []
__lowerCamelCase = []
for word, label in zip(example.words , example.labels ):
__lowerCamelCase = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(SCREAMING_SNAKE_CASE__ ) > 0:
tokens.extend(SCREAMING_SNAKE_CASE__ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(SCREAMING_SNAKE_CASE__ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
__lowerCamelCase = tokenizer.num_special_tokens_to_add()
if len(SCREAMING_SNAKE_CASE__ ) > max_seq_length - special_tokens_count:
__lowerCamelCase = tokens[: (max_seq_length - special_tokens_count)]
__lowerCamelCase = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
__lowerCamelCase = [sequence_a_segment_id] * len(SCREAMING_SNAKE_CASE__ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
__lowerCamelCase = [cls_token] + tokens
__lowerCamelCase = [pad_token_label_id] + label_ids
__lowerCamelCase = [cls_token_segment_id] + segment_ids
__lowerCamelCase = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
__lowerCamelCase = [1 if mask_padding_with_zero else 0] * len(SCREAMING_SNAKE_CASE__ )
# Zero-pad up to the sequence length.
__lowerCamelCase = max_seq_length - len(SCREAMING_SNAKE_CASE__ )
if pad_on_left:
__lowerCamelCase = ([pad_token] * padding_length) + input_ids
__lowerCamelCase = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
__lowerCamelCase = ([pad_token_segment_id] * padding_length) + segment_ids
__lowerCamelCase = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(SCREAMING_SNAKE_CASE__ ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE__ ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE__ ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE__ ) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''' )
logger.info('''guid: %s''' , example.guid )
logger.info('''tokens: %s''' , ''' '''.join([str(SCREAMING_SNAKE_CASE__ ) for x in tokens] ) )
logger.info('''input_ids: %s''' , ''' '''.join([str(SCREAMING_SNAKE_CASE__ ) for x in input_ids] ) )
logger.info('''input_mask: %s''' , ''' '''.join([str(SCREAMING_SNAKE_CASE__ ) for x in input_mask] ) )
logger.info('''segment_ids: %s''' , ''' '''.join([str(SCREAMING_SNAKE_CASE__ ) for x in segment_ids] ) )
logger.info('''label_ids: %s''' , ''' '''.join([str(SCREAMING_SNAKE_CASE__ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
__lowerCamelCase = None
features.append(
InputFeatures(
input_ids=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , label_ids=SCREAMING_SNAKE_CASE__ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowerCAmelCase__ ( __lowercase ):
a__ : List[InputFeatures]
a__ : int = nn.CrossEntropyLoss().ignore_index
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : TokenClassificationTask , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : Split = Split.train , ) -> Union[str, Any]:
# Load data features from cache or dataset file
__lowerCamelCase = os.path.join(
SCREAMING_SNAKE_CASE__ , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(SCREAMING_SNAKE_CASE__ ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowerCamelCase = cached_features_file + '''.lock'''
with FileLock(SCREAMING_SNAKE_CASE__ ):
if os.path.exists(SCREAMING_SNAKE_CASE__ ) and not overwrite_cache:
logger.info(f'''Loading features from cached file {cached_features_file}''' )
__lowerCamelCase = torch.load(SCREAMING_SNAKE_CASE__ )
else:
logger.info(f'''Creating features from dataset file at {data_dir}''' )
__lowerCamelCase = token_classification_task.read_examples_from_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# TODO clean up all this to leverage built-in features of tokenizers
__lowerCamelCase = token_classification_task.convert_examples_to_features(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=SCREAMING_SNAKE_CASE__ , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f'''Saving features into cached file {cached_features_file}''' )
torch.save(self.features , SCREAMING_SNAKE_CASE__ )
def __len__( self : Dict ) -> str:
return len(self.features )
def __getitem__( self : Any , SCREAMING_SNAKE_CASE__ : Dict ) -> InputFeatures:
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase__ :
a__ : List[InputFeatures]
a__ : int = -100
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : TokenClassificationTask , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Split = Split.train , ) -> List[Any]:
__lowerCamelCase = token_classification_task.read_examples_from_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# TODO clean up all this to leverage built-in features of tokenizers
__lowerCamelCase = token_classification_task.convert_examples_to_features(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=SCREAMING_SNAKE_CASE__ , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
__lowerCamelCase = tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE__ , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
__lowerCamelCase = tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE__ , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None] ),
'''attention_mask''': tf.TensorShape([None] ),
'''token_type_ids''': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def __A ( self : Union[str, Any] ) -> Union[str, Any]:
__lowerCamelCase = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : List[Any] ) -> Any:
return len(self.features )
def __getitem__( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> InputFeatures:
return self.features[i]
| 270 | 1 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = False, False, False
@dataclass
class lowerCAmelCase__ :
a__ : Optional[int] = None
a__ : bool = True
a__ : bool = True
a__ : Optional[str] = None
# Automatically constructed
a__ : ClassVar[str] = "dict"
a__ : ClassVar[Any] = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
a__ : str = field(default="""Audio""" , init=__lowercase , repr=__lowercase )
def __call__( self : Optional[int] ) -> List[Any]:
return self.pa_type
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, bytes, dict] ) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''' ) from err
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return {"bytes": None, "path": value}
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
__lowerCamelCase = BytesIO()
sf.write(SCREAMING_SNAKE_CASE__ , value['''array'''] , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm''' ):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''' )
if value.get('''bytes''' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
__lowerCamelCase = np.frombuffer(value['''bytes'''] , dtype=np.intaa ).astype(np.floataa ) / 3_27_67
else:
__lowerCamelCase = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''' ).astype(np.floataa ) / 3_27_67
__lowerCamelCase = BytesIO(bytes() )
sf.write(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
f'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def __A ( self : int , SCREAMING_SNAKE_CASE__ : dict , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Union[str, bool, None]]] = None ) -> dict:
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''' )
__lowerCamelCase , __lowerCamelCase = (value['''path'''], BytesIO(value['''bytes'''] )) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(f'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''' ) from err
__lowerCamelCase = xsplitext(SCREAMING_SNAKE_CASE__ )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
if file is None:
__lowerCamelCase = token_per_repo_id or {}
__lowerCamelCase = path.split('''::''' )[-1]
try:
__lowerCamelCase = string_to_dict(SCREAMING_SNAKE_CASE__ , config.HUB_DATASETS_URL )['''repo_id''']
__lowerCamelCase = token_per_repo_id[repo_id]
except (ValueError, KeyError):
__lowerCamelCase = None
with xopen(SCREAMING_SNAKE_CASE__ , '''rb''' , use_auth_token=SCREAMING_SNAKE_CASE__ ) as f:
__lowerCamelCase , __lowerCamelCase = sf.read(SCREAMING_SNAKE_CASE__ )
else:
__lowerCamelCase , __lowerCamelCase = sf.read(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = array.T
if self.mono:
__lowerCamelCase = librosa.to_mono(SCREAMING_SNAKE_CASE__ )
if self.sampling_rate and self.sampling_rate != sampling_rate:
__lowerCamelCase = librosa.resample(SCREAMING_SNAKE_CASE__ , orig_sr=SCREAMING_SNAKE_CASE__ , target_sr=self.sampling_rate )
__lowerCamelCase = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def __A ( self : int ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''' )
return {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[pa.StringArray, pa.StructArray] ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
__lowerCamelCase = pa.array([None] * len(SCREAMING_SNAKE_CASE__ ) , type=pa.binary() )
__lowerCamelCase = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__lowerCamelCase = pa.array([None] * len(SCREAMING_SNAKE_CASE__ ) , type=pa.string() )
__lowerCamelCase = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('''array''' ):
__lowerCamelCase = pa.array([Audio().encode_example(SCREAMING_SNAKE_CASE__ ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
__lowerCamelCase = storage.field('''bytes''' )
else:
__lowerCamelCase = pa.array([None] * len(SCREAMING_SNAKE_CASE__ ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
__lowerCamelCase = storage.field('''path''' )
else:
__lowerCamelCase = pa.array([None] * len(SCREAMING_SNAKE_CASE__ ) , type=pa.string() )
__lowerCamelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
return array_cast(SCREAMING_SNAKE_CASE__ , self.pa_type )
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : pa.StructArray ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(SCREAMING_SNAKE_CASE__ : List[Any] ):
with xopen(SCREAMING_SNAKE_CASE__ , '''rb''' ) as f:
__lowerCamelCase = f.read()
return bytes_
__lowerCamelCase = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
__lowerCamelCase = pa.array(
[os.path.basename(SCREAMING_SNAKE_CASE__ ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
__lowerCamelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(SCREAMING_SNAKE_CASE__ , self.pa_type )
| 270 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCAmelCase__ ( __lowercase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : NestedDataStructureLike[PathLike] , SCREAMING_SNAKE_CASE__ : Optional[NamedSplit] = None , SCREAMING_SNAKE_CASE__ : Optional[Features] = None , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[int] = None , **SCREAMING_SNAKE_CASE__ : str , ) -> Union[str, Any]:
super().__init__(
SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ , streaming=SCREAMING_SNAKE_CASE__ , num_proc=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = path_or_paths if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else {self.split: path_or_paths}
__lowerCamelCase = Text(
cache_dir=SCREAMING_SNAKE_CASE__ , data_files=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def __A ( self : int ) -> Dict:
# Build iterable dataset
if self.streaming:
__lowerCamelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
self.builder.download_and_prepare(
download_config=SCREAMING_SNAKE_CASE__ , download_mode=SCREAMING_SNAKE_CASE__ , verification_mode=SCREAMING_SNAKE_CASE__ , base_path=SCREAMING_SNAKE_CASE__ , num_proc=self.num_proc , )
__lowerCamelCase = self.builder.as_dataset(
split=self.split , verification_mode=SCREAMING_SNAKE_CASE__ , in_memory=self.keep_in_memory )
return dataset
| 270 | 1 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowerCAmelCase__ ( __lowercase ):
a__ : Any = ["""image_processor""", """tokenizer"""]
a__ : Any = """AutoImageProcessor"""
a__ : Union[str, Any] = """AutoTokenizer"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple:
__lowerCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = kwargs.pop('''feature_extractor''' )
__lowerCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.image_processor
__lowerCamelCase = False
def __call__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = kwargs.pop('''images''' , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = kwargs.pop('''text''' , SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
__lowerCamelCase = args[0]
__lowerCamelCase = args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
__lowerCamelCase = self.image_processor(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if text is not None:
__lowerCamelCase = self.tokenizer(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if text is None:
return inputs
elif images is None:
return encodings
else:
__lowerCamelCase = encodings['''input_ids''']
return inputs
def __A ( self : Tuple , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]:
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : int ) -> Optional[int]:
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@contextmanager
def __A ( self : List[str] ) -> Union[str, Any]:
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''' )
__lowerCamelCase = True
__lowerCamelCase = self.tokenizer
yield
__lowerCamelCase = self.image_processor
__lowerCamelCase = False
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : List[Any]=None ) -> Tuple:
if added_vocab is None:
__lowerCamelCase = self.tokenizer.get_added_vocab()
__lowerCamelCase = {}
while tokens:
__lowerCamelCase = re.search(R'''<s_(.*?)>''' , SCREAMING_SNAKE_CASE__ , re.IGNORECASE )
if start_token is None:
break
__lowerCamelCase = start_token.group(1 )
__lowerCamelCase = re.search(Rf'''</s_{key}>''' , SCREAMING_SNAKE_CASE__ , re.IGNORECASE )
__lowerCamelCase = start_token.group()
if end_token is None:
__lowerCamelCase = tokens.replace(SCREAMING_SNAKE_CASE__ , '''''' )
else:
__lowerCamelCase = end_token.group()
__lowerCamelCase = re.escape(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = re.escape(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = re.search(f'''{start_token_escaped}(.*?){end_token_escaped}''' , SCREAMING_SNAKE_CASE__ , re.IGNORECASE )
if content is not None:
__lowerCamelCase = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
__lowerCamelCase = self.tokenajson(SCREAMING_SNAKE_CASE__ , is_inner_value=SCREAMING_SNAKE_CASE__ , added_vocab=SCREAMING_SNAKE_CASE__ )
if value:
if len(SCREAMING_SNAKE_CASE__ ) == 1:
__lowerCamelCase = value[0]
__lowerCamelCase = value
else: # leaf nodes
__lowerCamelCase = []
for leaf in content.split(R'''<sep/>''' ):
__lowerCamelCase = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
__lowerCamelCase = leaf[1:-2] # for categorical special tokens
output[key].append(SCREAMING_SNAKE_CASE__ )
if len(output[key] ) == 1:
__lowerCamelCase = output[key][0]
__lowerCamelCase = tokens[tokens.find(SCREAMING_SNAKE_CASE__ ) + len(SCREAMING_SNAKE_CASE__ ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=SCREAMING_SNAKE_CASE__ , added_vocab=SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def __A ( self : Optional[Any] ) -> Dict:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , SCREAMING_SNAKE_CASE__ , )
return self.image_processor_class
@property
def __A ( self : Any ) -> Union[str, Any]:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , SCREAMING_SNAKE_CASE__ , )
return self.image_processor
| 270 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 270 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class lowerCAmelCase__ ( __lowercase ):
a__ : List[str] = """philschmid/bart-large-cnn-samsum"""
a__ : Optional[int] = (
"""This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """
"""and returns a summary of the text."""
)
a__ : Union[str, Any] = """summarizer"""
a__ : Dict = AutoTokenizer
a__ : Any = AutoModelForSeqaSeqLM
a__ : Tuple = ["""text"""]
a__ : Union[str, Any] = ["""text"""]
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
return self.pre_processor(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , truncation=SCREAMING_SNAKE_CASE__ )
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : int ) -> Optional[int]:
return self.model.generate(**SCREAMING_SNAKE_CASE__ )[0]
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ) -> Optional[Any]:
return self.pre_processor.decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
| 270 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "\\n@inproceedings{popovic-2015-chrf,\n title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",\n month = sep,\n year = \"2015\",\n address = \"Lisbon, Portugal\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W15-3049\",\n doi = \"10.18653/v1/W15-3049\",\n pages = \"392--395\",\n}\n@inproceedings{popovic-2017-chrf,\n title = \"chr{F}++: words helping character n-grams\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Second Conference on Machine Translation\",\n month = sep,\n year = \"2017\",\n address = \"Copenhagen, Denmark\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W17-4770\",\n doi = \"10.18653/v1/W17-4770\",\n pages = \"612--618\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
SCREAMING_SNAKE_CASE__ : int = "\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n"
SCREAMING_SNAKE_CASE__ : Optional[Any] = "\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n 'score' (float): The chrF (chrF++) score,\n 'char_order' (int): The character n-gram order,\n 'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n 'beta' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def __A ( self : Dict ) -> Tuple:
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/mjpost/sacreBLEU#chrf--chrf''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#chrf--chrf'''] , reference_urls=[
'''https://github.com/m-popovic/chrF''',
] , )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int = CHRF.CHAR_ORDER , SCREAMING_SNAKE_CASE__ : int = CHRF.WORD_ORDER , SCREAMING_SNAKE_CASE__ : int = CHRF.BETA , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , ) -> Optional[Any]:
__lowerCamelCase = len(references[0] )
if any(len(SCREAMING_SNAKE_CASE__ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
__lowerCamelCase = [[refs[i] for refs in references] for i in range(SCREAMING_SNAKE_CASE__ )]
__lowerCamelCase = CHRF(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = sb_chrf.corpus_score(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 270 | 1 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', F'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', F'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qpos_proj.weight', F'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kpos_proj.weight', F'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.weight', F'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', F'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', F'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kpos_proj.weight', F'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.weight', F'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', F'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', F'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', F'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_qpos_proj.bias', F'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_kpos_proj.bias', F'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.bias', F'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', F'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', F'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_kpos_proj.bias', F'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.bias', F'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', F'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def __magic_name__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str ) -> List[Any]:
__lowerCamelCase = state_dict.pop(__lowerCAmelCase )
__lowerCamelCase = val
def __magic_name__ ( __lowerCAmelCase : Any ) -> Dict:
__lowerCamelCase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__lowerCamelCase = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
__lowerCamelCase = value
else:
__lowerCamelCase = value
return new_state_dict
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any]=False ) -> Optional[Any]:
__lowerCamelCase = ''''''
if is_panoptic:
__lowerCamelCase = '''conditional_detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__lowerCamelCase = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
__lowerCamelCase = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase = in_proj_weight[:256, :]
__lowerCamelCase = in_proj_bias[:256]
__lowerCamelCase = in_proj_weight[256:512, :]
__lowerCamelCase = in_proj_bias[256:512]
__lowerCamelCase = in_proj_weight[-256:, :]
__lowerCamelCase = in_proj_bias[-256:]
def __magic_name__ ( ) -> Union[str, Any]:
__lowerCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowerCamelCase = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __magic_name__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ) -> Optional[int]:
__lowerCamelCase = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
__lowerCamelCase = '''resnet101'''
if "dc5" in model_name:
__lowerCamelCase = True
__lowerCamelCase = '''panoptic''' in model_name
if is_panoptic:
__lowerCamelCase = 250
else:
__lowerCamelCase = 91
__lowerCamelCase = '''huggingface/label-files'''
__lowerCamelCase = '''coco-detection-id2label.json'''
__lowerCamelCase = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
__lowerCamelCase = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
__lowerCamelCase = idalabel
__lowerCamelCase = {v: k for k, v in idalabel.items()}
# load image processor
__lowerCamelCase = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
__lowerCamelCase = ConditionalDetrImageProcessor(format=__lowerCAmelCase )
# prepare image
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' )
__lowerCamelCase = encoding['''pixel_values''']
logger.info(f'''Converting model {model_name}...''' )
# load original model from torch hub
__lowerCamelCase = torch.hub.load('''DeppMeng/ConditionalDETR''' , __lowerCAmelCase , pretrained=__lowerCAmelCase ).eval()
__lowerCamelCase = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
__lowerCamelCase = '''conditional_detr.''' + src
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = rename_backbone_keys(__lowerCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(__lowerCAmelCase , is_panoptic=__lowerCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__lowerCamelCase = '''conditional_detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''conditional_detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
__lowerCamelCase = state_dict.pop(__lowerCAmelCase )
__lowerCamelCase = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__lowerCamelCase = state_dict.pop(__lowerCAmelCase )
__lowerCamelCase = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
__lowerCamelCase = state_dict.pop(__lowerCAmelCase )
__lowerCamelCase = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
__lowerCamelCase = state_dict.pop(__lowerCAmelCase )
__lowerCamelCase = val
# finally, create HuggingFace model and load state dict
__lowerCamelCase = ConditionalDetrForSegmentation(__lowerCAmelCase ) if is_panoptic else ConditionalDetrForObjectDetection(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
model.push_to_hub(repo_id=__lowerCAmelCase , organization='''DepuMeng''' , commit_message='''Add model''' )
# verify our conversion
__lowerCamelCase = conditional_detr(__lowerCAmelCase )
__lowerCamelCase = model(__lowerCAmelCase )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1E-4 )
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
SCREAMING_SNAKE_CASE__ : str = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 270 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : List[Any] = {
"configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : int = [
"RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ResNetForImageClassification",
"ResNetModel",
"ResNetPreTrainedModel",
"ResNetBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [
"TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFResNetForImageClassification",
"TFResNetModel",
"TFResNetPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
"FlaxResNetForImageClassification",
"FlaxResNetModel",
"FlaxResNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 270 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[Any] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
SCREAMING_SNAKE_CASE__ : Dict = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"facebook/blenderbot-3B": 128}
class lowerCAmelCase__ ( __lowercase ):
a__ : List[str] = VOCAB_FILES_NAMES
a__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
a__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Dict = ["""input_ids""", """attention_mask"""]
a__ : str = BlenderbotTokenizer
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Tuple="replace" , SCREAMING_SNAKE_CASE__ : Dict="<s>" , SCREAMING_SNAKE_CASE__ : Any="</s>" , SCREAMING_SNAKE_CASE__ : List[Any]="</s>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE__ : List[Any]="<unk>" , SCREAMING_SNAKE_CASE__ : List[Any]="<pad>" , SCREAMING_SNAKE_CASE__ : str="<mask>" , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Optional[int]=True , **SCREAMING_SNAKE_CASE__ : Dict , ) -> int:
super().__init__(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , errors=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
__lowerCamelCase = getattr(SCREAMING_SNAKE_CASE__ , pre_tok_state.pop('''type''' ) )
__lowerCamelCase = add_prefix_space
__lowerCamelCase = pre_tok_class(**SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = add_prefix_space
__lowerCamelCase = '''post_processor'''
__lowerCamelCase = getattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if tokenizer_component_instance:
__lowerCamelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__lowerCamelCase = tuple(state['''sep'''] )
if "cls" in state:
__lowerCamelCase = tuple(state['''cls'''] )
__lowerCamelCase = False
if state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
__lowerCamelCase = add_prefix_space
__lowerCamelCase = True
if state.get('''trim_offsets''' , SCREAMING_SNAKE_CASE__ ) != trim_offsets:
__lowerCamelCase = trim_offsets
__lowerCamelCase = True
if changes_to_apply:
__lowerCamelCase = getattr(SCREAMING_SNAKE_CASE__ , state.pop('''type''' ) )
__lowerCamelCase = component_class(**SCREAMING_SNAKE_CASE__ )
setattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __A ( self : Tuple ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __A ( self : int , SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]:
__lowerCamelCase = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else value
__lowerCamelCase = value
def __A ( self : Any , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ) -> BatchEncoding:
__lowerCamelCase = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __A ( self : int , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : str ) -> BatchEncoding:
__lowerCamelCase = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]:
__lowerCamelCase = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __A ( self : int , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> Tuple:
return token_ids_a + [self.eos_token_id]
def __A ( self : str , SCREAMING_SNAKE_CASE__ : "Conversation" ) -> List[int]:
__lowerCamelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = ''' '''.join(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.encode(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > self.model_max_length:
__lowerCamelCase = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 270 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
SCREAMING_SNAKE_CASE__ : str = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any]=7 , SCREAMING_SNAKE_CASE__ : List[str]=3 , SCREAMING_SNAKE_CASE__ : Tuple=18 , SCREAMING_SNAKE_CASE__ : int=30 , SCREAMING_SNAKE_CASE__ : Any=4_00 , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , ) -> Union[str, Any]:
__lowerCamelCase = size if size is not None else {'''height''': 20, '''width''': 20}
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = num_channels
__lowerCamelCase = image_size
__lowerCamelCase = min_resolution
__lowerCamelCase = max_resolution
__lowerCamelCase = size
__lowerCamelCase = do_normalize
__lowerCamelCase = do_convert_rgb
__lowerCamelCase = [5_12, 10_24, 20_48, 40_96]
__lowerCamelCase = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
def __A ( self : Union[str, Any] ) -> Union[str, Any]:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __A ( self : int ) -> Any:
__lowerCamelCase = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
__lowerCamelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __lowercase , unittest.TestCase ):
a__ : Dict = PixaStructImageProcessor if is_vision_available() else None
def __A ( self : Optional[Any] ) -> List[Any]:
__lowerCamelCase = PixaStructImageProcessingTester(self )
@property
def __A ( self : Optional[int] ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Tuple ) -> Dict:
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_convert_rgb''' ) )
def __A ( self : int ) -> str:
__lowerCamelCase = self.image_processor_tester.prepare_dummy_image()
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
__lowerCamelCase = 20_48
__lowerCamelCase = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def __A ( self : Union[str, Any] ) -> Dict:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __A ( self : Dict ) -> str:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
__lowerCamelCase = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
__lowerCamelCase = '''Hello'''
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ , header_text=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ , header_text=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __A ( self : List[str] ) -> Any:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __A ( self : Tuple ) -> List[str]:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __lowercase , unittest.TestCase ):
a__ : Any = PixaStructImageProcessor if is_vision_available() else None
def __A ( self : Union[str, Any] ) -> Optional[Any]:
__lowerCamelCase = PixaStructImageProcessingTester(self , num_channels=4 )
__lowerCamelCase = 3
@property
def __A ( self : List[Any] ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Union[str, Any] ) -> List[str]:
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_convert_rgb''' ) )
def __A ( self : Optional[int] ) -> str:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 270 | 1 |
from __future__ import annotations
def __magic_name__ ( __lowerCAmelCase : list[int] ) -> int:
if not nums:
return 0
__lowerCamelCase = nums[0]
__lowerCamelCase = 0
for num in nums[1:]:
__lowerCamelCase , __lowerCamelCase = (
max_excluding + num,
max(__lowerCAmelCase , __lowerCAmelCase ),
)
return max(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 270 |
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class lowerCAmelCase__ ( __lowercase ):
def __init__( self : int ) -> Optional[int]:
__lowerCamelCase = []
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple:
self.events.append('''on_init_end''' )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
self.events.append('''on_train_begin''' )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : int ) -> Any:
self.events.append('''on_train_end''' )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
self.events.append('''on_epoch_begin''' )
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[Any]:
self.events.append('''on_epoch_end''' )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : str ) -> List[Any]:
self.events.append('''on_step_begin''' )
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
self.events.append('''on_step_end''' )
def __A ( self : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]:
self.events.append('''on_evaluate''' )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : str ) -> str:
self.events.append('''on_predict''' )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple:
self.events.append('''on_save''' )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> List[str]:
self.events.append('''on_log''' )
def __A ( self : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str:
self.events.append('''on_prediction_step''' )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
def __A ( self : Union[str, Any] ) -> Optional[Any]:
__lowerCamelCase = tempfile.mkdtemp()
def __A ( self : int ) -> List[str]:
shutil.rmtree(self.output_dir )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any]=0 , SCREAMING_SNAKE_CASE__ : Any=0 , SCREAMING_SNAKE_CASE__ : List[str]=64 , SCREAMING_SNAKE_CASE__ : Optional[int]=64 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=False , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Union[str, Any]:
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
__lowerCamelCase = RegressionDataset(length=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = RegressionDataset(length=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = RegressionModelConfig(a=SCREAMING_SNAKE_CASE__ , b=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = RegressionPreTrainedModel(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = TrainingArguments(self.output_dir , disable_tqdm=SCREAMING_SNAKE_CASE__ , report_to=[] , **SCREAMING_SNAKE_CASE__ )
return Trainer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , train_dataset=SCREAMING_SNAKE_CASE__ , eval_dataset=SCREAMING_SNAKE_CASE__ , callbacks=SCREAMING_SNAKE_CASE__ , )
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str ) -> Optional[int]:
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
# Order doesn't matter
__lowerCamelCase = sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : cb.__name__ if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cb.__class__.__name__ )
__lowerCamelCase = sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : cb.__name__ if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cb.__class__.__name__ )
for cba, cba in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(SCREAMING_SNAKE_CASE__ , cba.__class__ )
elif not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(cba.__class__ , SCREAMING_SNAKE_CASE__ )
else:
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
__lowerCamelCase = ['''on_init_end''', '''on_train_begin''']
__lowerCamelCase = 0
__lowerCamelCase = len(trainer.get_eval_dataloader() )
__lowerCamelCase = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(SCREAMING_SNAKE_CASE__ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def __A ( self : Union[str, Any] ) -> int:
__lowerCamelCase = self.get_trainer()
__lowerCamelCase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
# Callbacks passed at init are added to the default callbacks
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
__lowerCamelCase = self.get_trainer(disable_tqdm=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
def __A ( self : List[Any] ) -> str:
__lowerCamelCase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
__lowerCamelCase = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(SCREAMING_SNAKE_CASE__ )
expected_callbacks.remove(SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.get_trainer()
__lowerCamelCase = trainer.pop_callback(SCREAMING_SNAKE_CASE__ )
self.assertEqual(cb.__class__ , SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
trainer.add_callback(SCREAMING_SNAKE_CASE__ )
expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
# We can also add, pop, or remove by instance
__lowerCamelCase = self.get_trainer()
__lowerCamelCase = trainer.callback_handler.callbacks[0]
trainer.remove_callback(SCREAMING_SNAKE_CASE__ )
expected_callbacks.remove(SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.get_trainer()
__lowerCamelCase = trainer.callback_handler.callbacks[0]
__lowerCamelCase = trainer.pop_callback(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
trainer.add_callback(SCREAMING_SNAKE_CASE__ )
expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
def __A ( self : Union[str, Any] ) -> Any:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' , category=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
# Independent log/save/eval
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='''steps''' )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='''epoch''' )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
# A bit of everything
__lowerCamelCase = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='''steps''' , )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
__lowerCamelCase = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(SCREAMING_SNAKE_CASE__ ) in warn_mock.call_args[0][0]
| 270 | 1 |
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__ ( __lowercase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any]=13 , SCREAMING_SNAKE_CASE__ : int=7 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : int=99 , SCREAMING_SNAKE_CASE__ : List[Any]=32 , SCREAMING_SNAKE_CASE__ : Any=5 , SCREAMING_SNAKE_CASE__ : List[str]=4 , SCREAMING_SNAKE_CASE__ : List[str]=37 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Dict=5_12 , SCREAMING_SNAKE_CASE__ : Tuple=16 , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : int=0.02 , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : List[str]="None" , SCREAMING_SNAKE_CASE__ : Optional[int]=3 , SCREAMING_SNAKE_CASE__ : Optional[int]=4 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , ) -> Union[str, Any]:
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = relative_attention
__lowerCamelCase = position_biased_input
__lowerCamelCase = pos_att_type
__lowerCamelCase = scope
def __A ( self : Any ) -> str:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self : Any ) -> List[str]:
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __A ( self : Dict ) -> Dict:
__lowerCamelCase = self.get_config()
__lowerCamelCase = 3_00
return config
def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[int]:
__lowerCamelCase = DebertaModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )[0]
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )[0]
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] ) -> List[str]:
__lowerCamelCase = DebertaForMaskedLM(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict ) -> List[str]:
__lowerCamelCase = self.num_labels
__lowerCamelCase = DebertaForSequenceClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Any:
__lowerCamelCase = self.num_labels
__lowerCamelCase = DebertaForTokenClassification(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str ) -> Any:
__lowerCamelCase = DebertaForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCamelCase = model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , start_positions=SCREAMING_SNAKE_CASE__ , end_positions=SCREAMING_SNAKE_CASE__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self : str ) -> str:
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __lowercase , __lowercase , unittest.TestCase ):
a__ : Optional[int] = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
a__ : Union[str, Any] = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ : Dict = True
a__ : Dict = False
a__ : str = False
a__ : str = False
a__ : int = False
def __A ( self : List[str] ) -> Tuple:
__lowerCamelCase = DebertaModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def __A ( self : Optional[int] ) -> Any:
self.config_tester.run_common_tests()
def __A ( self : Optional[int] ) -> Union[str, Any]:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*SCREAMING_SNAKE_CASE__ )
def __A ( self : List[str] ) -> Optional[Any]:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*SCREAMING_SNAKE_CASE__ )
def __A ( self : Any ) -> List[Any]:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*SCREAMING_SNAKE_CASE__ )
def __A ( self : List[Any] ) -> Optional[Any]:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*SCREAMING_SNAKE_CASE__ )
def __A ( self : Any ) -> Dict:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def __A ( self : Tuple ) -> Optional[int]:
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = DebertaModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
@unittest.skip(reason='''Model not available yet''' )
def __A ( self : Dict ) -> List[str]:
pass
@slow
def __A ( self : Optional[Any] ) -> Optional[Any]:
__lowerCamelCase = DebertaModel.from_pretrained('''microsoft/deberta-base''' )
__lowerCamelCase = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
__lowerCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )[0]
# compare the actual values for a slice.
__lowerCamelCase = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) , f'''{output[:, 1:4, 1:4]}''' )
| 270 |
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class lowerCAmelCase__ :
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> Dict:
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
__lowerCamelCase = img
__lowerCamelCase = img.shape[1]
__lowerCamelCase = img.shape[0]
__lowerCamelCase = dst_width
__lowerCamelCase = dst_height
__lowerCamelCase = self.src_w / self.dst_w
__lowerCamelCase = self.src_h / self.dst_h
__lowerCamelCase = __lowerCamelCase = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 2_55
)
def __A ( self : List[Any] ) -> str:
for i in range(self.dst_h ):
for j in range(self.dst_w ):
__lowerCamelCase = self.img[self.get_y(SCREAMING_SNAKE_CASE__ )][self.get_x(SCREAMING_SNAKE_CASE__ )]
def __A ( self : str , SCREAMING_SNAKE_CASE__ : int ) -> int:
return int(self.ratio_x * x )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : int ) -> int:
return int(self.ratio_y * y )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = 800, 600
SCREAMING_SNAKE_CASE__ : int = imread("image_data/lena.jpg", 1)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F'Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}', n.output
)
waitKey(0)
destroyAllWindows()
| 270 | 1 |
def __magic_name__ ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : set ) -> int:
__lowerCamelCase , __lowerCamelCase = len(__lowerCAmelCase ), len(grid[0] )
if (
min(__lowerCAmelCase , __lowerCAmelCase ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
__lowerCamelCase = 0
count += depth_first_search(__lowerCAmelCase , row + 1 , __lowerCAmelCase , __lowerCAmelCase )
count += depth_first_search(__lowerCAmelCase , row - 1 , __lowerCAmelCase , __lowerCAmelCase )
count += depth_first_search(__lowerCAmelCase , __lowerCAmelCase , col + 1 , __lowerCAmelCase )
count += depth_first_search(__lowerCAmelCase , __lowerCAmelCase , col - 1 , __lowerCAmelCase )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 270 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def __magic_name__ ( __lowerCAmelCase : dict , __lowerCAmelCase : str , __lowerCAmelCase : set , __lowerCAmelCase : set , __lowerCAmelCase : dict , __lowerCAmelCase : dict , __lowerCAmelCase : PriorityQueue , __lowerCAmelCase : dict , __lowerCAmelCase : float | int , ) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
__lowerCamelCase = cst_fwd.get(__lowerCAmelCase , np.inf )
__lowerCamelCase = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
__lowerCamelCase = new_cost_f
__lowerCamelCase = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
__lowerCamelCase = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : dict , __lowerCAmelCase : dict ) -> int:
__lowerCamelCase = -1
__lowerCamelCase = set()
__lowerCamelCase = set()
__lowerCamelCase = {source: 0}
__lowerCamelCase = {destination: 0}
__lowerCamelCase = {source: None}
__lowerCamelCase = {destination: None}
__lowerCamelCase = PriorityQueue()
__lowerCamelCase = PriorityQueue()
__lowerCamelCase = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
__lowerCamelCase , __lowerCamelCase = queue_forward.get()
visited_forward.add(__lowerCAmelCase )
__lowerCamelCase , __lowerCamelCase = queue_backward.get()
visited_backward.add(__lowerCAmelCase )
__lowerCamelCase = pass_and_relaxation(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
__lowerCamelCase = pass_and_relaxation(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
__lowerCamelCase = shortest_distance
return shortest_path_distance
SCREAMING_SNAKE_CASE__ : List[Any] = {
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["G", 2]],
"F": [],
"G": [["F", 1]],
}
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"B": [["E", 1]],
"C": [["B", 1]],
"D": [["C", 1]],
"F": [["D", 1], ["G", 1]],
"E": [[None, np.inf]],
"G": [["E", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 270 | 1 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str=7 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3 , SCREAMING_SNAKE_CASE__ : List[Any]=18 , SCREAMING_SNAKE_CASE__ : Optional[int]=30 , SCREAMING_SNAKE_CASE__ : Dict=4_00 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Optional[int]=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__ : str=False , ) -> Dict:
__lowerCamelCase = size if size is not None else {'''height''': 20, '''width''': 20}
__lowerCamelCase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = num_channels
__lowerCamelCase = image_size
__lowerCamelCase = min_resolution
__lowerCamelCase = max_resolution
__lowerCamelCase = do_resize
__lowerCamelCase = size
__lowerCamelCase = do_center_crop
__lowerCamelCase = crop_size
__lowerCamelCase = do_normalize
__lowerCamelCase = image_mean
__lowerCamelCase = image_std
__lowerCamelCase = do_reduce_labels
def __A ( self : Tuple ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def __magic_name__ ( ) -> Any:
__lowerCamelCase = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
__lowerCamelCase = Image.open(dataset[0]['''file'''] )
__lowerCamelCase = Image.open(dataset[1]['''file'''] )
return image, map
def __magic_name__ ( ) -> Tuple:
__lowerCamelCase = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
__lowerCamelCase = Image.open(ds[0]['''file'''] )
__lowerCamelCase = Image.open(ds[1]['''file'''] )
__lowerCamelCase = Image.open(ds[2]['''file'''] )
__lowerCamelCase = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class lowerCAmelCase__ ( __lowercase , unittest.TestCase ):
a__ : int = BeitImageProcessor if is_vision_available() else None
def __A ( self : Tuple ) -> int:
__lowerCamelCase = BeitImageProcessingTester(self )
@property
def __A ( self : Dict ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : int ) -> Union[str, Any]:
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_resize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''size''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''center_crop''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''image_mean''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''image_std''' ) )
def __A ( self : Union[str, Any] ) -> Optional[Any]:
__lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=SCREAMING_SNAKE_CASE__ )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[int] ) -> Dict:
pass
def __A ( self : List[Any] ) -> Tuple:
# Initialize image_processing
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__lowerCamelCase = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __A ( self : int ) -> Dict:
# Initialize image_processing
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__lowerCamelCase = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __A ( self : int ) -> str:
# Initialize image_processing
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__lowerCamelCase = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __A ( self : str ) -> Optional[int]:
# Initialize image_processing
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = []
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test batched
__lowerCamelCase = image_processing(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test not batched input (PIL images)
__lowerCamelCase , __lowerCamelCase = prepare_semantic_single_inputs()
__lowerCamelCase = image_processing(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test batched input (PIL images)
__lowerCamelCase , __lowerCamelCase = prepare_semantic_batch_inputs()
__lowerCamelCase = image_processing(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
def __A ( self : str ) -> int:
# Initialize image_processing
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
__lowerCamelCase , __lowerCamelCase = prepare_semantic_single_inputs()
__lowerCamelCase = image_processing(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 1_50 )
__lowerCamelCase = True
__lowerCamelCase = image_processing(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
| 270 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE__ : List[Any] = TypeVar("T")
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
return (position - 1) // 2
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
return (2 * position) + 1
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
return (2 * position) + 2
class lowerCAmelCase__ ( Generic[T] ):
def __init__( self : List[str] ) -> None:
__lowerCamelCase = []
__lowerCamelCase = {}
__lowerCamelCase = 0
def __len__( self : Optional[int] ) -> int:
return self.elements
def __repr__( self : Optional[int] ) -> str:
return str(self.heap )
def __A ( self : Union[str, Any] ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def __A ( self : str , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
__lowerCamelCase = self.elements
self.elements += 1
self._bubble_up(SCREAMING_SNAKE_CASE__ )
def __A ( self : Any ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
__lowerCamelCase , __lowerCamelCase = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__lowerCamelCase , __lowerCamelCase = self.heap[0]
self._bubble_down(SCREAMING_SNAKE_CASE__ )
return elem
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Update the weight of the given key
__lowerCamelCase = self.position_map[elem]
__lowerCamelCase = (elem, weight)
if position > 0:
__lowerCamelCase = get_parent_position(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(SCREAMING_SNAKE_CASE__ )
else:
self._bubble_down(SCREAMING_SNAKE_CASE__ )
else:
self._bubble_down(SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : T ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
__lowerCamelCase = self.position_map[elem]
if curr_pos == 0:
return None
__lowerCamelCase = get_parent_position(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase = self.heap[curr_pos]
__lowerCamelCase , __lowerCamelCase = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_up(SCREAMING_SNAKE_CASE__ )
return None
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : T ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
__lowerCamelCase = self.position_map[elem]
__lowerCamelCase , __lowerCamelCase = self.heap[curr_pos]
__lowerCamelCase = get_child_left_position(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = get_child_right_position(SCREAMING_SNAKE_CASE__ )
if child_left_position < self.elements and child_right_position < self.elements:
__lowerCamelCase , __lowerCamelCase = self.heap[child_left_position]
__lowerCamelCase , __lowerCamelCase = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
if child_left_position < self.elements:
__lowerCamelCase , __lowerCamelCase = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
else:
return None
if child_right_position < self.elements:
__lowerCamelCase , __lowerCamelCase = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
return None
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Swap the nodes at the given positions
__lowerCamelCase = self.heap[nodea_pos][0]
__lowerCamelCase = self.heap[nodea_pos][0]
__lowerCamelCase , __lowerCamelCase = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__lowerCamelCase = nodea_pos
__lowerCamelCase = nodea_pos
class lowerCAmelCase__ ( Generic[T] ):
def __init__( self : Tuple ) -> None:
__lowerCamelCase = {}
__lowerCamelCase = 0
def __repr__( self : Optional[int] ) -> str:
return str(self.connections )
def __len__( self : List[str] ) -> int:
return self.nodes
def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : T ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
__lowerCamelCase = {}
self.nodes += 1
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(SCREAMING_SNAKE_CASE__ )
self.add_node(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = weight
__lowerCamelCase = weight
def __magic_name__ ( __lowerCAmelCase : GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
__lowerCamelCase = {node: maxsize for node in graph.connections}
__lowerCamelCase = {node: None for node in graph.connections}
__lowerCamelCase = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(__lowerCAmelCase , __lowerCAmelCase )
if priority_queue.is_empty():
return dist, parent
# initialization
__lowerCamelCase = priority_queue.extract_min()
__lowerCamelCase = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__lowerCamelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowerCAmelCase , dist[neighbour] )
__lowerCamelCase = node
# running prim's algorithm
while not priority_queue.is_empty():
__lowerCamelCase = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__lowerCamelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowerCAmelCase , dist[neighbour] )
__lowerCamelCase = node
return dist, parent
| 270 | 1 |
import warnings
from .generation import TFGenerationMixin
class lowerCAmelCase__ ( __lowercase ):
# warning at import time
warnings.warn(
"""Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will """
"""be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.""" , __lowercase , )
| 270 |
from __future__ import annotations
from statistics import mean
def __magic_name__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int ) -> list[int]:
__lowerCamelCase = [0] * no_of_processes
__lowerCamelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(__lowerCAmelCase ):
__lowerCamelCase = burst_time[i]
__lowerCamelCase = []
__lowerCamelCase = 0
__lowerCamelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__lowerCamelCase = []
__lowerCamelCase = -1
for i in range(__lowerCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
__lowerCamelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__lowerCamelCase = i
total_time += burst_time[target_process]
completed += 1
__lowerCamelCase = 0
__lowerCamelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def __magic_name__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : list[int] ) -> list[int]:
__lowerCamelCase = [0] * no_of_processes
for i in range(__lowerCAmelCase ):
__lowerCamelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("[TEST CASE 01]")
SCREAMING_SNAKE_CASE__ : Tuple = 4
SCREAMING_SNAKE_CASE__ : Optional[int] = [2, 5, 3, 7]
SCREAMING_SNAKE_CASE__ : List[str] = [0, 0, 0, 0]
SCREAMING_SNAKE_CASE__ : str = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time")
for i, process_id in enumerate(list(range(1, 5))):
print(
F'{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'
F'{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'
)
print(F'\nAverage waiting time = {mean(waiting_time):.5f}')
print(F'Average turnaround time = {mean(turn_around_time):.5f}')
| 270 | 1 |
import math
import sys
def __magic_name__ ( __lowerCAmelCase : str ) -> str:
__lowerCamelCase = ''''''
try:
with open(__lowerCAmelCase , '''rb''' ) as binary_file:
__lowerCamelCase = binary_file.read()
for dat in data:
__lowerCamelCase = f'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def __magic_name__ ( __lowerCAmelCase : str ) -> str:
__lowerCamelCase = {'''0''': '''0''', '''1''': '''1'''}
__lowerCamelCase , __lowerCamelCase = '''''', ''''''
__lowerCamelCase = len(__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__lowerCamelCase = lexicon[curr_string]
result += last_match_id
__lowerCamelCase = last_match_id + '''0'''
if math.loga(__lowerCAmelCase ).is_integer():
__lowerCamelCase = {}
for curr_key in list(__lowerCAmelCase ):
__lowerCamelCase = lexicon.pop(__lowerCAmelCase )
__lowerCamelCase = new_lex
__lowerCamelCase = last_match_id + '''1'''
index += 1
__lowerCamelCase = ''''''
return result
def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ) -> None:
__lowerCamelCase = 8
try:
with open(__lowerCAmelCase , '''wb''' ) as opened_file:
__lowerCamelCase = [
to_write[i : i + byte_length]
for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(__lowerCAmelCase , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def __magic_name__ ( __lowerCAmelCase : str ) -> str:
__lowerCamelCase = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
__lowerCamelCase = data_bits[counter:]
__lowerCamelCase = data_bits[counter + 1 :]
return data_bits
def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ) -> None:
__lowerCamelCase = read_file_binary(__lowerCAmelCase )
__lowerCamelCase = remove_prefix(__lowerCAmelCase )
__lowerCamelCase = decompress_data(__lowerCAmelCase )
write_file_binary(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 270 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowerCAmelCase__ ( __lowercase ):
@staticmethod
@abstractmethod
def __A ( SCREAMING_SNAKE_CASE__ : ArgumentParser ) -> str:
raise NotImplementedError()
@abstractmethod
def __A ( self : Optional[int] ) -> Union[str, Any]:
raise NotImplementedError()
| 270 | 1 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCAmelCase__ ( __lowercase ):
def __A ( self : int ) -> Union[str, Any]:
__lowerCamelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''width_multiplier''' ) )
class lowerCAmelCase__ :
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=13 , SCREAMING_SNAKE_CASE__ : str=64 , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : Dict=3 , SCREAMING_SNAKE_CASE__ : int="swish" , SCREAMING_SNAKE_CASE__ : Optional[Any]=3 , SCREAMING_SNAKE_CASE__ : List[str]=32 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : str=10 , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.25 , SCREAMING_SNAKE_CASE__ : str=0.0 , SCREAMING_SNAKE_CASE__ : List[str]=0.0 , ) -> Optional[Any]:
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = make_divisible(5_12 * width_multiplier , divisor=8 )
__lowerCamelCase = hidden_act
__lowerCamelCase = conv_kernel_size
__lowerCamelCase = output_stride
__lowerCamelCase = classifier_dropout_prob
__lowerCamelCase = use_labels
__lowerCamelCase = is_training
__lowerCamelCase = num_labels
__lowerCamelCase = initializer_range
__lowerCamelCase = scope
__lowerCamelCase = width_multiplier
__lowerCamelCase = ffn_dropout
__lowerCamelCase = attn_dropout
def __A ( self : Union[str, Any] ) -> Optional[Any]:
__lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowerCamelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def __A ( self : str ) -> int:
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Any:
__lowerCamelCase = MobileViTVaModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __A ( self : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict ) -> List[Any]:
__lowerCamelCase = self.num_labels
__lowerCamelCase = MobileViTVaForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Any:
__lowerCamelCase = self.num_labels
__lowerCamelCase = MobileViTVaForSemanticSegmentation(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __A ( self : List[str] ) -> Dict:
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __lowercase , __lowercase , unittest.TestCase ):
a__ : Optional[int] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
a__ : Any = (
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a__ : List[str] = False
a__ : Optional[Any] = False
a__ : str = False
a__ : Optional[Any] = False
def __A ( self : List[Any] ) -> Optional[Any]:
__lowerCamelCase = MobileViTVaModelTester(self )
__lowerCamelCase = MobileViTVaConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ )
def __A ( self : int ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def __A ( self : str ) -> str:
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def __A ( self : Tuple ) -> Optional[Any]:
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def __A ( self : Union[str, Any] ) -> Tuple:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def __A ( self : List[Any] ) -> List[Any]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __A ( self : int ) -> Dict:
pass
def __A ( self : Optional[Any] ) -> Any:
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def __A ( self : List[str] ) -> Any:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def __A ( self : Dict ) -> Optional[Any]:
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
__lowerCamelCase = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = outputs.hidden_states
__lowerCamelCase = 5
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__lowerCamelCase = 2
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __A ( self : List[str] ) -> Optional[Any]:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ )
def __A ( self : int ) -> Tuple:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE__ )
@slow
def __A ( self : Optional[Any] ) -> Any:
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = MobileViTVaModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def __magic_name__ ( ) -> Optional[int]:
__lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def __A ( self : str ) -> Dict:
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def __A ( self : Optional[int] ) -> Union[str, Any]:
__lowerCamelCase = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
__lowerCamelCase = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
__lowerCamelCase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
@slow
def __A ( self : List[str] ) -> Any:
__lowerCamelCase = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__lowerCamelCase = model.to(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
__lowerCamelCase = model(**SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = outputs.logits
# verify the logits
__lowerCamelCase = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=SCREAMING_SNAKE_CASE__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
@slow
def __A ( self : Union[str, Any] ) -> Optional[int]:
__lowerCamelCase = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__lowerCamelCase = model.to(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
__lowerCamelCase = model(**SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = outputs.logits.detach().cpu()
__lowerCamelCase = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE__ , target_sizes=[(50, 60)] )
__lowerCamelCase = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE__ )
| 270 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class lowerCAmelCase__ ( unittest.TestCase ):
def __A ( self : Optional[int] ) -> List[Any]:
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='''utf-8''' , check=SCREAMING_SNAKE_CASE__ , )
assert hasattr(self , '''env''' )
def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]:
# configuration for running training on smdistributed Model Parallel
__lowerCamelCase = {
'''enabled''': True,
'''processes_per_host''': 8,
}
__lowerCamelCase = {
'''enabled''': True,
'''parameters''': {
'''microbatches''': 4,
'''placement_strategy''': '''spread''',
'''pipeline''': '''interleaved''',
'''optimize''': '''speed''',
'''partitions''': 4,
'''ddp''': True,
},
}
__lowerCamelCase = {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options}
__lowerCamelCase = '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=SCREAMING_SNAKE_CASE__ , instance_type=self.instance_type , debugger_hook_config=SCREAMING_SNAKE_CASE__ , hyperparameters={
**self.env.hyperparameters,
'''model_name_or_path''': self.model_name_or_path,
'''max_steps''': 5_00,
} , metric_definitions=self.env.metric_definitions , distribution=SCREAMING_SNAKE_CASE__ , py_version='''py36''' , )
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[str]:
TrainingJobAnalytics(SCREAMING_SNAKE_CASE__ ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
# create estimator
__lowerCamelCase = self.create_estimator(SCREAMING_SNAKE_CASE__ )
# run training
estimator.fit()
# result dataframe
__lowerCamelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__lowerCamelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
__lowerCamelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowerCamelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , SCREAMING_SNAKE_CASE__ )
| 270 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class lowerCAmelCase__ ( __lowercase ):
a__ : List[Any] = """levit"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2_24 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : List[Any]=3 , SCREAMING_SNAKE_CASE__ : Optional[int]=2 , SCREAMING_SNAKE_CASE__ : Optional[int]=1 , SCREAMING_SNAKE_CASE__ : str=16 , SCREAMING_SNAKE_CASE__ : Optional[Any]=[1_28, 2_56, 3_84] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=[4, 8, 12] , SCREAMING_SNAKE_CASE__ : Tuple=[4, 4, 4] , SCREAMING_SNAKE_CASE__ : Optional[int]=[16, 16, 16] , SCREAMING_SNAKE_CASE__ : Optional[int]=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=[2, 2, 2] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=[2, 2, 2] , SCREAMING_SNAKE_CASE__ : int=0.02 , **SCREAMING_SNAKE_CASE__ : str , ) -> str:
super().__init__(**SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = image_size
__lowerCamelCase = num_channels
__lowerCamelCase = kernel_size
__lowerCamelCase = stride
__lowerCamelCase = padding
__lowerCamelCase = hidden_sizes
__lowerCamelCase = num_attention_heads
__lowerCamelCase = depths
__lowerCamelCase = key_dim
__lowerCamelCase = drop_path_rate
__lowerCamelCase = patch_size
__lowerCamelCase = attention_ratio
__lowerCamelCase = mlp_ratio
__lowerCamelCase = initializer_range
__lowerCamelCase = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class lowerCAmelCase__ ( __lowercase ):
a__ : str = version.parse("""1.11""" )
@property
def __A ( self : int ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __A ( self : str ) -> float:
return 1e-4
| 270 |
from __future__ import annotations
from fractions import Fraction
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __magic_name__ ( __lowerCAmelCase : int ) -> list[str]:
__lowerCamelCase = []
__lowerCamelCase = 11
__lowerCamelCase = int('''1''' + '''0''' * digit_len )
for num in range(__lowerCAmelCase , __lowerCAmelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(__lowerCAmelCase , __lowerCAmelCase ):
solutions.append(f'''{num}/{den}''' )
den += 1
num += 1
__lowerCamelCase = 10
return solutions
def __magic_name__ ( __lowerCAmelCase : int = 2 ) -> int:
__lowerCamelCase = 1.0
for fraction in fraction_list(__lowerCAmelCase ):
__lowerCamelCase = Fraction(__lowerCAmelCase )
result *= frac.denominator / frac.numerator
return int(__lowerCAmelCase )
if __name__ == "__main__":
print(solution())
| 270 | 1 |
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
SCREAMING_SNAKE_CASE__ : Union[str, Any] = float("nan")
class lowerCAmelCase__ :
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str ) -> Union[str, Any]:
__lowerCamelCase = sys.stdout
__lowerCamelCase = open(SCREAMING_SNAKE_CASE__ , '''a''' )
def __getattr__( self : Dict , SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]:
return getattr(self.stdout , SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[Any]:
self.stdout.write(SCREAMING_SNAKE_CASE__ )
# strip tqdm codes
self.file.write(re.sub(R'''^.*\r''' , '''''' , SCREAMING_SNAKE_CASE__ , 0 , re.M ) )
def __magic_name__ ( __lowerCAmelCase : Optional[Any]=80 , __lowerCAmelCase : Tuple=False ) -> str:
__lowerCamelCase = []
# deal with critical env vars
__lowerCamelCase = ['''CUDA_VISIBLE_DEVICES''']
for key in env_keys:
__lowerCamelCase = os.environ.get(__lowerCAmelCase , __lowerCAmelCase )
if val is not None:
cmd.append(f'''{key}={val}''' )
# python executable (not always needed if the script is executable)
__lowerCamelCase = sys.executable if full_python_path else sys.executable.split('''/''' )[-1]
cmd.append(__lowerCAmelCase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
__lowerCamelCase = []
__lowerCamelCase = ''''''
while len(__lowerCAmelCase ) > 0:
current_line += f'''{cmd.pop(0 )} '''
if len(__lowerCAmelCase ) == 0 or len(__lowerCAmelCase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(__lowerCAmelCase )
__lowerCamelCase = ''''''
return "\\\n".join(__lowerCAmelCase )
def __magic_name__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ) -> Union[str, Any]:
# unwrap multi-line input
__lowerCamelCase = re.sub(R'''[\\\n]+''' , ''' ''' , args.base_cmd )
# remove --output_dir if any and set our own
__lowerCamelCase = re.sub('''--output_dir\s+[^\s]+''' , '''''' , args.base_cmd )
args.base_cmd += f''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
__lowerCamelCase = re.sub('''--overwrite_output_dir\s+''' , '''''' , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] ) -> Optional[Any]:
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.22222222] )} , )
__lowerCamelCase = subprocess.run(__lowerCAmelCase , capture_output=__lowerCAmelCase , text=__lowerCAmelCase )
if verbose:
print('''STDOUT''' , result.stdout )
print('''STDERR''' , result.stderr )
# save the streams
__lowerCamelCase = variation.replace(''' ''' , '''-''' )
with open(Path(__lowerCAmelCase ) / f'''log.{prefix}.stdout.txt''' , '''w''' ) as f:
f.write(result.stdout )
with open(Path(__lowerCAmelCase ) / f'''log.{prefix}.stderr.txt''' , '''w''' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('''failed''' )
return {target_metric_key: nan}
with io.open(f'''{output_dir}/all_results.json''' , '''r''' , encoding='''utf-8''' ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , ) -> Tuple:
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = f'''{id}: {variation:<{longest_variation_len}}'''
__lowerCamelCase = f'''{preamble}: '''
__lowerCamelCase = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(__lowerCAmelCase ) , desc=__lowerCAmelCase , leave=__lowerCAmelCase ):
__lowerCamelCase = process_run_single(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = single_run_metrics[target_metric_key]
if not math.isnan(__lowerCAmelCase ):
metrics.append(__lowerCAmelCase )
results.append(__lowerCAmelCase )
outcome += "✓"
else:
outcome += "✘"
__lowerCamelCase = f'''\33[2K\r{outcome}'''
if len(__lowerCAmelCase ) > 0:
__lowerCamelCase = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
__lowerCamelCase = round(mean_metrics[target_metric_key] , 2 )
__lowerCamelCase = f'''{outcome} {mean_target}'''
if len(__lowerCAmelCase ) > 1:
results_str += f''' {tuple(round(__lowerCAmelCase , 2 ) for x in results )}'''
print(__lowerCAmelCase )
__lowerCamelCase = variation
return mean_metrics
else:
print(__lowerCAmelCase )
return {variation_key: variation, target_metric_key: nan}
def __magic_name__ ( ) -> Optional[int]:
__lowerCamelCase = torch.cuda.get_device_properties(torch.device('''cuda''' ) )
return f'''
Datetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
'''
def __magic_name__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int ) -> Optional[int]:
__lowerCamelCase = pd.DataFrame(__lowerCAmelCase )
__lowerCamelCase = '''variation'''
__lowerCamelCase = '''diff_%'''
__lowerCamelCase = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
__lowerCamelCase = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(__lowerCAmelCase ):
# as a fallback, use the minimal value as the sentinel
__lowerCamelCase = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(__lowerCAmelCase ):
__lowerCamelCase = df.apply(
lambda __lowerCAmelCase : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis='''columns''' , )
# re-order columns
__lowerCamelCase = [variation_key, target_metric_key, diff_key, *report_metric_keys]
__lowerCamelCase = df.reindex(__lowerCAmelCase , axis='''columns''' ) # reorder cols
# capitalize
__lowerCamelCase = df.rename(str.capitalize , axis='''columns''' )
# make the cols as narrow as possible
__lowerCamelCase = df.rename(lambda __lowerCAmelCase : c.replace('''_''' , '''<br>''' ) , axis='''columns''' )
__lowerCamelCase = df.rename(lambda __lowerCAmelCase : c.replace('''_''' , '''\n''' ) , axis='''columns''' )
__lowerCamelCase = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum''']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=__lowerCAmelCase , floatfmt='''.2f''' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=__lowerCAmelCase , floatfmt='''.2f''' )]
print('''\n\n'''.join(__lowerCAmelCase ) )
def __magic_name__ ( ) -> Any:
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--base-cmd''' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='''Base cmd''' , )
parser.add_argument(
'''--variations''' , default=__lowerCAmelCase , type=__lowerCAmelCase , nargs='''+''' , required=__lowerCAmelCase , help='''Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'''' , )
parser.add_argument(
'''--base-variation''' , default=__lowerCAmelCase , type=__lowerCAmelCase , help='''Baseline variation to compare to. if None the minimal target value will be used to compare against''' , )
parser.add_argument(
'''--target-metric-key''' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='''Target metric key in output_dir/all_results.json, e.g., train_samples_per_second''' , )
parser.add_argument(
'''--report-metric-keys''' , default='''''' , type=__lowerCAmelCase , help='''Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples''' , )
parser.add_argument(
'''--repeat-times''' , default=1 , type=__lowerCAmelCase , help='''How many times to re-run each variation - an average will be reported''' , )
parser.add_argument(
'''--output_dir''' , default='''output_benchmark''' , type=__lowerCAmelCase , help='''The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked''' , )
parser.add_argument(
'''--verbose''' , default=__lowerCAmelCase , action='''store_true''' , help='''Whether to show the outputs of each run or just the benchmark progress''' , )
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = args.output_dir
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
__lowerCamelCase = get_base_command(__lowerCAmelCase , __lowerCAmelCase )
# split each dimension into its --foo variations
__lowerCamelCase = [list(map(str.strip , re.split(R'''\|''' , __lowerCAmelCase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
__lowerCamelCase = list(map(str.strip , map(''' '''.join , itertools.product(*__lowerCAmelCase ) ) ) )
__lowerCamelCase = max(len(__lowerCAmelCase ) for x in variations )
# split wanted keys
__lowerCamelCase = args.report_metric_keys.split()
# capture prints into a log file for convenience
__lowerCamelCase = f'''benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt'''
print(f'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(f'''and this script\'s output is also piped into {report_fn}''' )
__lowerCamelCase = Tee(__lowerCAmelCase )
print(f'''\n*** Running {len(__lowerCAmelCase )} benchmarks:''' )
print(f'''Base command: {' '.join(__lowerCAmelCase )}''' )
__lowerCamelCase = '''variation'''
__lowerCamelCase = []
for id, variation in enumerate(tqdm(__lowerCAmelCase , desc='''Total completion: ''' , leave=__lowerCAmelCase ) ):
__lowerCamelCase = base_cmd + variation.split()
results.append(
process_run(
id + 1 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , args.target_metric_key , __lowerCAmelCase , args.repeat_times , __lowerCAmelCase , args.verbose , ) )
process_results(__lowerCAmelCase , args.target_metric_key , __lowerCAmelCase , args.base_variation , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 270 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ : List[str] = {
"configuration_blip": [
"BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlipConfig",
"BlipTextConfig",
"BlipVisionConfig",
],
"processing_blip": ["BlipProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = ["BlipImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : int = [
"BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlipModel",
"BlipPreTrainedModel",
"BlipForConditionalGeneration",
"BlipForQuestionAnswering",
"BlipVisionModel",
"BlipTextModel",
"BlipForImageTextRetrieval",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Any = [
"TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBlipModel",
"TFBlipPreTrainedModel",
"TFBlipForConditionalGeneration",
"TFBlipForQuestionAnswering",
"TFBlipVisionModel",
"TFBlipTextModel",
"TFBlipForImageTextRetrieval",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 270 | 1 |
def __magic_name__ ( __lowerCAmelCase : str ) -> bool:
if not all(x.isalpha() for x in string ):
raise ValueError('''String must only contain alphabetic characters.''' )
__lowerCamelCase = sorted(string.lower() )
return len(__lowerCAmelCase ) == len(set(__lowerCAmelCase ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = input("Enter a string ").strip()
SCREAMING_SNAKE_CASE__ : Dict = is_isogram(input_str)
print(F'{input_str} is {"an" if isogram else "not an"} isogram.')
| 270 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"configuration_wavlm": ["WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "WavLMConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : int = [
"WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"WavLMForAudioFrameClassification",
"WavLMForCTC",
"WavLMForSequenceClassification",
"WavLMForXVector",
"WavLMModel",
"WavLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 270 | 1 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class lowerCAmelCase__ :
a__ : Optional[Union[str, Path]] = None
a__ : bool = False
a__ : bool = False
a__ : bool = False
a__ : Optional[Dict] = None
a__ : Optional[str] = None
a__ : bool = False
a__ : bool = False
a__ : bool = False
a__ : bool = True
a__ : Optional[int] = None
a__ : int = 1
a__ : Optional[Union[str, bool]] = None
a__ : bool = False
a__ : Optional[Dict] = None
a__ : Optional[str] = None
def __A ( self : int ) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(SCREAMING_SNAKE_CASE__ ) for k, v in self.__dict__.items()} )
| 270 |
import pprint
import requests
SCREAMING_SNAKE_CASE__ : str = "https://zenquotes.io/api"
def __magic_name__ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def __magic_name__ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : int = random_quotes()
pprint.pprint(response)
| 270 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( __lowercase , unittest.TestCase ):
a__ : str = DiTPipeline
a__ : List[Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
a__ : str = PipelineTesterMixin.required_optional_params - {
"""latents""",
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
a__ : List[str] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
a__ : int = False
def __A ( self : Any ) -> Union[str, Any]:
torch.manual_seed(0 )
__lowerCamelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=SCREAMING_SNAKE_CASE__ , activation_fn='''gelu-approximate''' , num_embeds_ada_norm=10_00 , norm_type='''ada_norm_zero''' , norm_elementwise_affine=SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = AutoencoderKL()
__lowerCamelCase = DDIMScheduler()
__lowerCamelCase = {'''transformer''': transformer.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler}
return components
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any=0 ) -> Tuple:
if str(SCREAMING_SNAKE_CASE__ ).startswith('''mps''' ):
__lowerCamelCase = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
__lowerCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = {
'''class_labels''': [1],
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __A ( self : Tuple ) -> Dict:
__lowerCamelCase = '''cpu'''
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = pipe(**SCREAMING_SNAKE_CASE__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
__lowerCamelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
__lowerCamelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE__ , 1e-3 )
def __A ( self : Tuple ) -> Optional[Any]:
self._test_inference_batch_single_identical(relax_max_difference=SCREAMING_SNAKE_CASE__ , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __A ( self : Any ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
def __A ( self : Union[str, Any] ) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Dict ) -> str:
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-256''' )
pipe.to('''cuda''' )
__lowerCamelCase = ['''vase''', '''umbrella''', '''white shark''', '''white wolf''']
__lowerCamelCase = pipe.get_label_ids(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = pipe(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=40 , output_type='''np''' ).images
for word, image in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = load_numpy(
f'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-2
def __A ( self : Dict ) -> List[Any]:
__lowerCamelCase = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-512''' )
__lowerCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('''cuda''' )
__lowerCamelCase = ['''vase''', '''umbrella''']
__lowerCamelCase = pipe.get_label_ids(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=25 , output_type='''np''' ).images
for word, image in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
f'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 270 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
__lowerCamelCase = prime_factors(__lowerCAmelCase )
if is_square_free(__lowerCAmelCase ):
return -1 if len(__lowerCAmelCase ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 270 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
SCREAMING_SNAKE_CASE__ : Tuple = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE__ : Tuple = {
"vocab_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"
),
"google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt",
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"
),
"google/electra-base-generator": (
"https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"
),
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE__ : Dict = {
"google/electra-small-generator": 512,
"google/electra-base-generator": 512,
"google/electra-large-generator": 512,
"google/electra-small-discriminator": 512,
"google/electra-base-discriminator": 512,
"google/electra-large-discriminator": 512,
}
SCREAMING_SNAKE_CASE__ : int = {
"google/electra-small-generator": {"do_lower_case": True},
"google/electra-base-generator": {"do_lower_case": True},
"google/electra-large-generator": {"do_lower_case": True},
"google/electra-small-discriminator": {"do_lower_case": True},
"google/electra-base-discriminator": {"do_lower_case": True},
"google/electra-large-discriminator": {"do_lower_case": True},
}
class lowerCAmelCase__ ( __lowercase ):
a__ : int = VOCAB_FILES_NAMES
a__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
a__ : List[Any] = PRETRAINED_INIT_CONFIGURATION
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Optional[Any] = ElectraTokenizer
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : str="[UNK]" , SCREAMING_SNAKE_CASE__ : Optional[int]="[SEP]" , SCREAMING_SNAKE_CASE__ : str="[PAD]" , SCREAMING_SNAKE_CASE__ : Dict="[CLS]" , SCREAMING_SNAKE_CASE__ : Optional[Any]="[MASK]" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Any=None , **SCREAMING_SNAKE_CASE__ : List[Any] , ) -> List[Any]:
super().__init__(
SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , SCREAMING_SNAKE_CASE__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , SCREAMING_SNAKE_CASE__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , SCREAMING_SNAKE_CASE__ ) != tokenize_chinese_chars
):
__lowerCamelCase = getattr(SCREAMING_SNAKE_CASE__ , normalizer_state.pop('''type''' ) )
__lowerCamelCase = do_lower_case
__lowerCamelCase = strip_accents
__lowerCamelCase = tokenize_chinese_chars
__lowerCamelCase = normalizer_class(**SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = do_lower_case
def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any]=None ) -> Dict:
__lowerCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]:
__lowerCamelCase = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
| 270 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
SCREAMING_SNAKE_CASE__ : Union[str, Any] = re.compile(r"\b(a|an|the)\b", re.UNICODE)
SCREAMING_SNAKE_CASE__ : int = None
def __magic_name__ ( ) -> str:
__lowerCamelCase = argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''' )
parser.add_argument('''data_file''' , metavar='''data.json''' , help='''Input data JSON file.''' )
parser.add_argument('''pred_file''' , metavar='''pred.json''' , help='''Model predictions.''' )
parser.add_argument(
'''--out-file''' , '''-o''' , metavar='''eval.json''' , help='''Write accuracy metrics to file (default is stdout).''' )
parser.add_argument(
'''--na-prob-file''' , '''-n''' , metavar='''na_prob.json''' , help='''Model estimates of probability of no answer.''' )
parser.add_argument(
'''--na-prob-thresh''' , '''-t''' , type=__lowerCAmelCase , default=1.0 , help='''Predict "" if no-answer probability exceeds this (default = 1.0).''' , )
parser.add_argument(
'''--out-image-dir''' , '''-p''' , metavar='''out_images''' , default=__lowerCAmelCase , help='''Save precision-recall curves to directory.''' )
parser.add_argument('''--verbose''' , '''-v''' , action='''store_true''' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Union[str, Any]:
__lowerCamelCase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__lowerCamelCase = bool(qa['''answers''']['''text'''] )
return qid_to_has_ans
def __magic_name__ ( __lowerCAmelCase : Dict ) -> Optional[Any]:
def remove_articles(__lowerCAmelCase : Optional[int] ):
return ARTICLES_REGEX.sub(''' ''' , __lowerCAmelCase )
def white_space_fix(__lowerCAmelCase : Optional[int] ):
return " ".join(text.split() )
def remove_punc(__lowerCAmelCase : Union[str, Any] ):
__lowerCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCAmelCase : Dict ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCAmelCase ) ) ) )
def __magic_name__ ( __lowerCAmelCase : List[Any] ) -> Optional[int]:
if not s:
return []
return normalize_answer(__lowerCAmelCase ).split()
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple ) -> int:
return int(normalize_answer(__lowerCAmelCase ) == normalize_answer(__lowerCAmelCase ) )
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ) -> str:
__lowerCamelCase = get_tokens(__lowerCAmelCase )
__lowerCamelCase = get_tokens(__lowerCAmelCase )
__lowerCamelCase = collections.Counter(__lowerCAmelCase ) & collections.Counter(__lowerCAmelCase )
__lowerCamelCase = sum(common.values() )
if len(__lowerCAmelCase ) == 0 or len(__lowerCAmelCase ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
__lowerCamelCase = 1.0 * num_same / len(__lowerCAmelCase )
__lowerCamelCase = 1.0 * num_same / len(__lowerCAmelCase )
__lowerCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ) -> Optional[Any]:
__lowerCamelCase = {}
__lowerCamelCase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__lowerCamelCase = qa['''id''']
__lowerCamelCase = [t for t in qa['''answers''']['''text'''] if normalize_answer(__lowerCAmelCase )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
__lowerCamelCase = ['''''']
if qid not in preds:
print(f'''Missing prediction for {qid}''' )
continue
__lowerCamelCase = preds[qid]
# Take max over all gold answers
__lowerCamelCase = max(compute_exact(__lowerCAmelCase , __lowerCAmelCase ) for a in gold_answers )
__lowerCamelCase = max(compute_fa(__lowerCAmelCase , __lowerCAmelCase ) for a in gold_answers )
return exact_scores, fa_scores
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] ) -> List[str]:
__lowerCamelCase = {}
for qid, s in scores.items():
__lowerCamelCase = na_probs[qid] > na_prob_thresh
if pred_na:
__lowerCamelCase = float(not qid_to_has_ans[qid] )
else:
__lowerCamelCase = s
return new_scores
def __magic_name__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any]=None ) -> Union[str, Any]:
if not qid_list:
__lowerCamelCase = len(__lowerCAmelCase )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores.values() ) / total),
('''f1''', 100.0 * sum(fa_scores.values() ) / total),
('''total''', total),
] )
else:
__lowerCamelCase = len(__lowerCAmelCase )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('''f1''', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('''total''', total),
] )
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] ) -> int:
for k in new_eval:
__lowerCamelCase = new_eval[k]
def __magic_name__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ) -> Optional[Any]:
plt.step(__lowerCAmelCase , __lowerCAmelCase , color='''b''' , alpha=0.2 , where='''post''' )
plt.fill_between(__lowerCAmelCase , __lowerCAmelCase , step='''post''' , alpha=0.2 , color='''b''' )
plt.xlabel('''Recall''' )
plt.ylabel('''Precision''' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__lowerCAmelCase )
plt.savefig(__lowerCAmelCase )
plt.clf()
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Tuple=None ) -> int:
__lowerCamelCase = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : na_probs[k] )
__lowerCamelCase = 0.0
__lowerCamelCase = 1.0
__lowerCamelCase = 0.0
__lowerCamelCase = [1.0]
__lowerCamelCase = [0.0]
__lowerCamelCase = 0.0
for i, qid in enumerate(__lowerCAmelCase ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
__lowerCamelCase = true_pos / float(i + 1 )
__lowerCamelCase = true_pos / float(__lowerCAmelCase )
if i == len(__lowerCAmelCase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__lowerCAmelCase )
recalls.append(__lowerCAmelCase )
if out_image:
plot_pr_curve(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return {"ap": 100.0 * avg_prec}
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] ) -> List[Any]:
if out_image_dir and not os.path.exists(__lowerCAmelCase ):
os.makedirs(__lowerCAmelCase )
__lowerCamelCase = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_exact.png''' ) , title='''Precision-Recall curve for Exact Match score''' , )
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_f1.png''' ) , title='''Precision-Recall curve for F1 score''' , )
__lowerCamelCase = {k: float(__lowerCAmelCase ) for k, v in qid_to_has_ans.items()}
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_oracle.png''' ) , title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''' , )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_exact''' )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_f1''' )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_oracle''' )
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int ) -> Optional[Any]:
if not qid_list:
return
__lowerCamelCase = [na_probs[k] for k in qid_list]
__lowerCamelCase = np.ones_like(__lowerCAmelCase ) / float(len(__lowerCAmelCase ) )
plt.hist(__lowerCAmelCase , weights=__lowerCAmelCase , bins=20 , range=(0.0, 1.0) )
plt.xlabel('''Model probability of no-answer''' )
plt.ylabel('''Proportion of dataset''' )
plt.title(f'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(__lowerCAmelCase , f'''na_prob_hist_{name}.png''' ) )
plt.clf()
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
__lowerCamelCase = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
__lowerCamelCase = num_no_ans
__lowerCamelCase = cur_score
__lowerCamelCase = 0.0
__lowerCamelCase = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : na_probs[k] )
for i, qid in enumerate(__lowerCAmelCase ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
__lowerCamelCase = scores[qid]
else:
if preds[qid]:
__lowerCamelCase = -1
else:
__lowerCamelCase = 0
cur_score += diff
if cur_score > best_score:
__lowerCamelCase = cur_score
__lowerCamelCase = na_probs[qid]
return 100.0 * best_score / len(__lowerCAmelCase ), best_thresh
def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] ) -> int:
__lowerCamelCase , __lowerCamelCase = find_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase , __lowerCamelCase = find_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = best_exact
__lowerCamelCase = exact_thresh
__lowerCamelCase = best_fa
__lowerCamelCase = fa_thresh
def __magic_name__ ( ) -> Optional[int]:
with open(OPTS.data_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
__lowerCamelCase = dataset_json['''data''']
with open(OPTS.pred_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
else:
__lowerCamelCase = {k: 0.0 for k in preds}
__lowerCamelCase = make_qid_to_has_ans(__lowerCAmelCase ) # maps qid to True/False
__lowerCamelCase = [k for k, v in qid_to_has_ans.items() if v]
__lowerCamelCase = [k for k, v in qid_to_has_ans.items() if not v]
__lowerCamelCase , __lowerCamelCase = get_raw_scores(__lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = apply_no_ans_threshold(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.na_prob_thresh )
__lowerCamelCase = apply_no_ans_threshold(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.na_prob_thresh )
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase )
if has_ans_qids:
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase , qid_list=__lowerCAmelCase )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''HasAns''' )
if no_ans_qids:
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase , qid_list=__lowerCAmelCase )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''NoAns''' )
if OPTS.na_prob_file:
find_all_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir )
histogram_na_prob(__lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir , '''hasAns''' )
histogram_na_prob(__lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir , '''noAns''' )
if OPTS.out_file:
with open(OPTS.out_file , '''w''' ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
else:
print(json.dumps(__lowerCAmelCase , indent=2 ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 270 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : str = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple ) -> Optional[int]:
for attribute in key.split('''.''' ):
__lowerCamelCase = getattr(__lowerCAmelCase , __lowerCAmelCase )
if weight_type is not None:
__lowerCamelCase = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape
else:
__lowerCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__lowerCamelCase = value
elif weight_type == "weight_g":
__lowerCamelCase = value
elif weight_type == "weight_v":
__lowerCamelCase = value
elif weight_type == "bias":
__lowerCamelCase = value
else:
__lowerCamelCase = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] ) -> List[str]:
__lowerCamelCase = []
__lowerCamelCase = fairseq_model.state_dict()
__lowerCamelCase = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == '''group''' , )
__lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
__lowerCamelCase = '''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__lowerCamelCase = True
if "*" in mapped_key:
__lowerCamelCase = name.split(__lowerCAmelCase )[0].split('''.''' )[-2]
__lowerCamelCase = mapped_key.replace('''*''' , __lowerCAmelCase )
if "weight_g" in name:
__lowerCamelCase = '''weight_g'''
elif "weight_v" in name:
__lowerCamelCase = '''weight_v'''
elif "weight" in name:
__lowerCamelCase = '''weight'''
elif "bias" in name:
__lowerCamelCase = '''bias'''
else:
__lowerCamelCase = None
set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
continue
if not is_used:
unused_weights.append(__lowerCAmelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] ) -> Optional[Any]:
__lowerCamelCase = full_name.split('''conv_layers.''' )[-1]
__lowerCamelCase = name.split('''.''' )
__lowerCamelCase = int(items[0] )
__lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__lowerCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__lowerCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__lowerCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__lowerCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowerCAmelCase )
def __magic_name__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ) -> List[Any]:
__lowerCamelCase = SEWConfig()
if is_finetuned:
__lowerCamelCase = model.wav_encoder.wav_model.cfg
else:
__lowerCamelCase = model.cfg
__lowerCamelCase = fs_config.conv_bias
__lowerCamelCase = eval(fs_config.conv_feature_layers )
__lowerCamelCase = [x[0] for x in conv_layers]
__lowerCamelCase = [x[1] for x in conv_layers]
__lowerCamelCase = [x[2] for x in conv_layers]
__lowerCamelCase = '''gelu'''
__lowerCamelCase = '''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group'''
__lowerCamelCase = 0.0
__lowerCamelCase = fs_config.activation_fn.name
__lowerCamelCase = fs_config.encoder_embed_dim
__lowerCamelCase = 0.02
__lowerCamelCase = fs_config.encoder_ffn_embed_dim
__lowerCamelCase = 1E-5
__lowerCamelCase = fs_config.encoder_layerdrop
__lowerCamelCase = fs_config.encoder_attention_heads
__lowerCamelCase = fs_config.conv_pos_groups
__lowerCamelCase = fs_config.conv_pos
__lowerCamelCase = len(__lowerCAmelCase )
__lowerCamelCase = fs_config.encoder_layers
__lowerCamelCase = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
__lowerCamelCase = model.cfg
__lowerCamelCase = fs_config.final_dropout
__lowerCamelCase = fs_config.layerdrop
__lowerCamelCase = fs_config.activation_dropout
__lowerCamelCase = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
__lowerCamelCase = fs_config.attention_dropout
__lowerCamelCase = fs_config.dropout_input
__lowerCamelCase = fs_config.dropout
__lowerCamelCase = fs_config.mask_channel_length
__lowerCamelCase = fs_config.mask_channel_prob
__lowerCamelCase = fs_config.mask_length
__lowerCamelCase = fs_config.mask_prob
__lowerCamelCase = '''Wav2Vec2FeatureExtractor'''
__lowerCamelCase = '''Wav2Vec2CTCTokenizer'''
return config
@torch.no_grad()
def __magic_name__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Tuple=True ) -> int:
if is_finetuned:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
__lowerCamelCase = SEWConfig.from_pretrained(__lowerCAmelCase )
else:
__lowerCamelCase = convert_config(model[0] , __lowerCAmelCase )
__lowerCamelCase = model[0].eval()
__lowerCamelCase = True if config.feat_extract_norm == '''layer''' else False
__lowerCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , )
if is_finetuned:
if dict_path:
__lowerCamelCase = Dictionary.load(__lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowerCamelCase = target_dict.pad_index
__lowerCamelCase = target_dict.bos_index
__lowerCamelCase = target_dict.pad_index
__lowerCamelCase = target_dict.bos_index
__lowerCamelCase = target_dict.eos_index
__lowerCamelCase = len(target_dict.symbols )
__lowerCamelCase = os.path.join(__lowerCAmelCase , '''vocab.json''' )
if not os.path.isdir(__lowerCAmelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__lowerCAmelCase ) )
return
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , __lowerCAmelCase )
__lowerCamelCase = WavaVecaCTCTokenizer(
__lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__lowerCAmelCase , )
__lowerCamelCase = WavaVecaProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
__lowerCamelCase = SEWForCTC(__lowerCAmelCase )
else:
__lowerCamelCase = SEWModel(__lowerCAmelCase )
feature_extractor.save_pretrained(__lowerCAmelCase )
recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
hf_model.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 270 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F'{bindir}/../../examples/pytorch/translation'):
from run_translation import main # noqa
set_seed(42)
SCREAMING_SNAKE_CASE__ : Any = "sshleifer/student_marian_en_ro_6_1"
SCREAMING_SNAKE_CASE__ : Tuple = "sshleifer/tiny-mbart"
@require_torch
class lowerCAmelCase__ ( __lowercase ):
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Optional[int]=True , ) -> Optional[int]:
__lowerCamelCase = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=SCREAMING_SNAKE_CASE__ , num_train_epochs=1 , distributed=SCREAMING_SNAKE_CASE__ , extra_args_str=SCREAMING_SNAKE_CASE__ , predict_with_generate=SCREAMING_SNAKE_CASE__ , do_train=SCREAMING_SNAKE_CASE__ , do_eval=SCREAMING_SNAKE_CASE__ , do_predict=SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = TrainerState.load_from_json(os.path.join(SCREAMING_SNAKE_CASE__ , '''trainer_state.json''' ) ).log_history
if not do_eval:
return
__lowerCamelCase = [log for log in logs if '''eval_loss''' in log.keys()]
__lowerCamelCase = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
__lowerCamelCase = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , SCREAMING_SNAKE_CASE__ )
assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def __A ( self : Optional[int] ) -> int:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def __A ( self : int ) -> List[str]:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ )
@require_torch_multi_gpu
def __A ( self : Optional[Any] ) -> Tuple:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __A ( self : Dict ) -> Tuple:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--sharded_ddp simple''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __A ( self : Optional[int] ) -> List[str]:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--sharded_ddp simple --fp16''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __A ( self : Tuple ) -> Any:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=SCREAMING_SNAKE_CASE__ )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __A ( self : Dict ) -> Tuple:
self.run_seqaseq_quick(
distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=SCREAMING_SNAKE_CASE__ )
@require_apex
@require_torch_gpu
def __A ( self : Union[str, Any] ) -> List[str]:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--fp16 --fp16_backend=apex''' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--fp16 --fp16_backend=apex''' )
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] )
@require_torch_multi_gpu
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[Any]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
__lowerCamelCase = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
__lowerCamelCase = experiments[experiment_id]
__lowerCamelCase = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
__lowerCamelCase = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**SCREAMING_SNAKE_CASE__ , extra_args_str=data['''extra_args_str'''] )
__lowerCamelCase = len(re.findall(SCREAMING_SNAKE_CASE__ , cl.err ) )
self.assertEqual(SCREAMING_SNAKE_CASE__ , data['''n_matches'''] )
@slow
def __A ( self : Any ) -> Optional[Any]:
__lowerCamelCase = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=SCREAMING_SNAKE_CASE__ , learning_rate=3e-4 , num_train_epochs=10 , distributed=SCREAMING_SNAKE_CASE__ , )
# Check metrics
__lowerCamelCase = TrainerState.load_from_json(os.path.join(SCREAMING_SNAKE_CASE__ , '''trainer_state.json''' ) ).log_history
__lowerCamelCase = [log for log in logs if '''eval_loss''' in log.keys()]
__lowerCamelCase = eval_metrics[0]
__lowerCamelCase = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , SCREAMING_SNAKE_CASE__ )
# test if do_predict saves generations and metrics
__lowerCamelCase = os.listdir(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = {os.path.basename(SCREAMING_SNAKE_CASE__ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def __A ( self : Optional[int] ) -> str:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(SCREAMING_SNAKE_CASE__ : str ) -> Tuple[int, float]:
__lowerCamelCase = '''--skip_memory_metrics 0'''
__lowerCamelCase = self.run_trainer(
max_len=1_28 , model_name=SCREAMING_SNAKE_CASE__ , learning_rate=3e-4 , num_train_epochs=1 , optim=SCREAMING_SNAKE_CASE__ , distributed=SCREAMING_SNAKE_CASE__ , extra_args_str=SCREAMING_SNAKE_CASE__ , do_eval=SCREAMING_SNAKE_CASE__ , do_predict=SCREAMING_SNAKE_CASE__ , n_gpus_to_use=1 , )
# Check metrics
__lowerCamelCase = TrainerState.load_from_json(Path(SCREAMING_SNAKE_CASE__ , '''trainer_state.json''' ) ).log_history
__lowerCamelCase = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**20 )
__lowerCamelCase = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**20 )
__lowerCamelCase = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
__lowerCamelCase = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
__lowerCamelCase = gpu_peak_mem_orig + gpu_alloc_mem_orig
__lowerCamelCase = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
__lowerCamelCase = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
__lowerCamelCase = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
f''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
f''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
f''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
f''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , f'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float = 3e-3 , SCREAMING_SNAKE_CASE__ : str = "adafactor" , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : int = None , ) -> List[Any]:
__lowerCamelCase = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
__lowerCamelCase = self.get_auto_remove_tmp_dir()
__lowerCamelCase = f'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(SCREAMING_SNAKE_CASE__ )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(SCREAMING_SNAKE_CASE__ )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
__lowerCamelCase = f'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(SCREAMING_SNAKE_CASE__ )}
'''.split()
__lowerCamelCase = '''
--do_predict
'''.split()
__lowerCamelCase = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
__lowerCamelCase = get_gpu_count()
__lowerCamelCase = get_torch_dist_unique_port()
__lowerCamelCase = f'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
__lowerCamelCase = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=self.get_env() )
else:
__lowerCamelCase = ['''run_translation.py'''] + args
with patch.object(SCREAMING_SNAKE_CASE__ , '''argv''' , SCREAMING_SNAKE_CASE__ ):
main()
return output_dir
| 270 | 1 |
import numpy as np
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : List[str] ) -> List[Any]:
__lowerCamelCase = int(np.ceil((x_end - xa) / h ) )
__lowerCamelCase = np.zeros((n + 1,) )
__lowerCamelCase = ya
__lowerCamelCase = xa
for k in range(__lowerCAmelCase ):
__lowerCamelCase = f(__lowerCAmelCase , y[k] )
__lowerCamelCase = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
__lowerCamelCase = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
__lowerCamelCase = f(x + h , y[k] + h * ka )
__lowerCamelCase = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 270 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ : Dict = {
"facebook/mask2former-swin-small-coco-instance": (
"https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
class lowerCAmelCase__ ( __lowercase ):
a__ : Any = """mask2former"""
a__ : Dict = ["""swin"""]
a__ : Any = {"""hidden_size""": """hidden_dim"""}
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Optional[Dict] = None , SCREAMING_SNAKE_CASE__ : int = 2_56 , SCREAMING_SNAKE_CASE__ : int = 2_56 , SCREAMING_SNAKE_CASE__ : int = 2_56 , SCREAMING_SNAKE_CASE__ : int = 10_24 , SCREAMING_SNAKE_CASE__ : str = "relu" , SCREAMING_SNAKE_CASE__ : int = 6 , SCREAMING_SNAKE_CASE__ : int = 10 , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.0 , SCREAMING_SNAKE_CASE__ : int = 20_48 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : int = 4 , SCREAMING_SNAKE_CASE__ : int = 2_55 , SCREAMING_SNAKE_CASE__ : int = 1_00 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 2.0 , SCREAMING_SNAKE_CASE__ : float = 5.0 , SCREAMING_SNAKE_CASE__ : float = 5.0 , SCREAMING_SNAKE_CASE__ : int = 1_25_44 , SCREAMING_SNAKE_CASE__ : float = 3.0 , SCREAMING_SNAKE_CASE__ : float = 0.75 , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : float = 1.0 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : List[int] = [4, 8, 16, 32] , SCREAMING_SNAKE_CASE__ : bool = None , **SCREAMING_SNAKE_CASE__ : Tuple , ) -> str:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.''' )
__lowerCamelCase = CONFIG_MAPPING['''swin'''](
image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=SCREAMING_SNAKE_CASE__ , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = backbone_config.pop('''model_type''' )
__lowerCamelCase = CONFIG_MAPPING[backbone_model_type]
__lowerCamelCase = config_class.from_dict(SCREAMING_SNAKE_CASE__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
f'''Supported model types: {','.join(self.backbones_supported )}''' )
__lowerCamelCase = backbone_config
__lowerCamelCase = feature_size
__lowerCamelCase = mask_feature_size
__lowerCamelCase = hidden_dim
__lowerCamelCase = encoder_feedforward_dim
__lowerCamelCase = activation_function
__lowerCamelCase = encoder_layers
__lowerCamelCase = decoder_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = dropout
__lowerCamelCase = dim_feedforward
__lowerCamelCase = pre_norm
__lowerCamelCase = enforce_input_projection
__lowerCamelCase = common_stride
__lowerCamelCase = ignore_value
__lowerCamelCase = num_queries
__lowerCamelCase = no_object_weight
__lowerCamelCase = class_weight
__lowerCamelCase = mask_weight
__lowerCamelCase = dice_weight
__lowerCamelCase = train_num_points
__lowerCamelCase = oversample_ratio
__lowerCamelCase = importance_sample_ratio
__lowerCamelCase = init_std
__lowerCamelCase = init_xavier_std
__lowerCamelCase = use_auxiliary_loss
__lowerCamelCase = feature_strides
__lowerCamelCase = output_auxiliary_logits
__lowerCamelCase = decoder_layers
super().__init__(**SCREAMING_SNAKE_CASE__ )
@classmethod
def __A ( cls : Any , SCREAMING_SNAKE_CASE__ : PretrainedConfig , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[Any]:
return cls(
backbone_config=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def __A ( self : Any ) -> Dict[str, any]:
__lowerCamelCase = copy.deepcopy(self.__dict__ )
__lowerCamelCase = self.backbone_config.to_dict()
__lowerCamelCase = self.__class__.model_type
return output
| 270 | 1 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class lowerCAmelCase__ ( __lowercase ):
a__ : List[Any] = ["""vqvae"""]
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : AutoencoderKL , SCREAMING_SNAKE_CASE__ : UNetaDConditionModel , SCREAMING_SNAKE_CASE__ : Mel , SCREAMING_SNAKE_CASE__ : Union[DDIMScheduler, DDPMScheduler] , ) -> List[str]:
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , mel=SCREAMING_SNAKE_CASE__ , vqvae=SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[Any] ) -> int:
return 50 if isinstance(self.scheduler , SCREAMING_SNAKE_CASE__ ) else 10_00
@torch.no_grad()
def __call__( self : Dict , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : np.ndarray = None , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = None , SCREAMING_SNAKE_CASE__ : torch.Generator = None , SCREAMING_SNAKE_CASE__ : float = 0 , SCREAMING_SNAKE_CASE__ : float = 0 , SCREAMING_SNAKE_CASE__ : torch.Generator = None , SCREAMING_SNAKE_CASE__ : float = 0 , SCREAMING_SNAKE_CASE__ : torch.Tensor = None , SCREAMING_SNAKE_CASE__ : torch.Tensor = None , SCREAMING_SNAKE_CASE__ : Any=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
__lowerCamelCase = steps or self.get_default_steps()
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
__lowerCamelCase = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
__lowerCamelCase = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=SCREAMING_SNAKE_CASE__ , device=self.device , )
__lowerCamelCase = noise
__lowerCamelCase = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.mel.audio_slice_to_image(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
__lowerCamelCase = (input_image / 2_55) * 2 - 1
__lowerCamelCase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
__lowerCamelCase = self.vqvae.encode(torch.unsqueeze(SCREAMING_SNAKE_CASE__ , 0 ) ).latent_dist.sample(
generator=SCREAMING_SNAKE_CASE__ )[0]
__lowerCamelCase = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
__lowerCamelCase = self.scheduler.add_noise(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.scheduler.timesteps[start_step - 1] )
__lowerCamelCase = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
__lowerCamelCase = int(mask_start_secs * pixels_per_second )
__lowerCamelCase = int(mask_end_secs * pixels_per_second )
__lowerCamelCase = self.scheduler.add_noise(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = self.unet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )['''sample''']
else:
__lowerCamelCase = self.unet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )['''sample''']
if isinstance(self.scheduler , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = self.scheduler.step(
model_output=SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , sample=SCREAMING_SNAKE_CASE__ , eta=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , )['''prev_sample''']
else:
__lowerCamelCase = self.scheduler.step(
model_output=SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , sample=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
__lowerCamelCase = mask[:, step, :, :mask_start]
if mask_end > 0:
__lowerCamelCase = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
__lowerCamelCase = 1 / self.vqvae.config.scaling_factor * images
__lowerCamelCase = self.vqvae.decode(SCREAMING_SNAKE_CASE__ )['''sample''']
__lowerCamelCase = (images / 2 + 0.5).clamp(0 , 1 )
__lowerCamelCase = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
__lowerCamelCase = (images * 2_55).round().astype('''uint8''' )
__lowerCamelCase = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(SCREAMING_SNAKE_CASE__ , mode='''RGB''' ).convert('''L''' ) for _ in images) )
__lowerCamelCase = [self.mel.image_to_audio(SCREAMING_SNAKE_CASE__ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(SCREAMING_SNAKE_CASE__ )[:, np.newaxis, :] ) , **ImagePipelineOutput(SCREAMING_SNAKE_CASE__ ) )
@torch.no_grad()
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Image.Image] , SCREAMING_SNAKE_CASE__ : int = 50 ) -> np.ndarray:
assert isinstance(self.scheduler , SCREAMING_SNAKE_CASE__ )
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
__lowerCamelCase = (sample / 2_55) * 2 - 1
__lowerCamelCase = torch.Tensor(SCREAMING_SNAKE_CASE__ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
__lowerCamelCase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
__lowerCamelCase = self.scheduler.alphas_cumprod[t]
__lowerCamelCase = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
__lowerCamelCase = 1 - alpha_prod_t
__lowerCamelCase = self.unet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )['''sample''']
__lowerCamelCase = (1 - alpha_prod_t_prev) ** 0.5 * model_output
__lowerCamelCase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
__lowerCamelCase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __A ( SCREAMING_SNAKE_CASE__ : torch.Tensor , SCREAMING_SNAKE_CASE__ : torch.Tensor , SCREAMING_SNAKE_CASE__ : float ) -> torch.Tensor:
__lowerCamelCase = acos(torch.dot(torch.flatten(SCREAMING_SNAKE_CASE__ ) , torch.flatten(SCREAMING_SNAKE_CASE__ ) ) / torch.norm(SCREAMING_SNAKE_CASE__ ) / torch.norm(SCREAMING_SNAKE_CASE__ ) )
return sin((1 - alpha) * theta ) * xa / sin(SCREAMING_SNAKE_CASE__ ) + sin(alpha * theta ) * xa / sin(SCREAMING_SNAKE_CASE__ )
| 270 |
import os
def __magic_name__ ( ) -> str:
__lowerCamelCase = os.path.join(os.path.dirname(__lowerCAmelCase ) , '''num.txt''' )
with open(__lowerCAmelCase ) as file_hand:
return str(sum(int(__lowerCAmelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 270 | 1 |
def __magic_name__ ( __lowerCAmelCase : int = 50 ) -> int:
__lowerCamelCase = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F'{solution() = }')
| 270 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase__ :
a__ : str
a__ : List[str]
a__ : Optional[List[str]]
@dataclass
class lowerCAmelCase__ :
a__ : List[int]
a__ : List[int]
a__ : Optional[List[int]] = None
a__ : Optional[List[int]] = None
class lowerCAmelCase__ ( __lowercase ):
a__ : Optional[Any] = """train"""
a__ : Optional[int] = """dev"""
a__ : Dict = """test"""
class lowerCAmelCase__ :
@staticmethod
def __A ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[Split, str] ) -> List[InputExample]:
raise NotImplementedError
@staticmethod
def __A ( SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
raise NotImplementedError
@staticmethod
def __A ( SCREAMING_SNAKE_CASE__ : List[InputExample] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : List[str]="[CLS]" , SCREAMING_SNAKE_CASE__ : Tuple=1 , SCREAMING_SNAKE_CASE__ : str="[SEP]" , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : Tuple=0 , SCREAMING_SNAKE_CASE__ : int=0 , SCREAMING_SNAKE_CASE__ : str=-1_00 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : List[Any]=True , ) -> List[InputFeatures]:
__lowerCamelCase = {label: i for i, label in enumerate(SCREAMING_SNAKE_CASE__ )}
__lowerCamelCase = []
for ex_index, example in enumerate(SCREAMING_SNAKE_CASE__ ):
if ex_index % 1_00_00 == 0:
logger.info('''Writing example %d of %d''' , SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = []
__lowerCamelCase = []
for word, label in zip(example.words , example.labels ):
__lowerCamelCase = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(SCREAMING_SNAKE_CASE__ ) > 0:
tokens.extend(SCREAMING_SNAKE_CASE__ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(SCREAMING_SNAKE_CASE__ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
__lowerCamelCase = tokenizer.num_special_tokens_to_add()
if len(SCREAMING_SNAKE_CASE__ ) > max_seq_length - special_tokens_count:
__lowerCamelCase = tokens[: (max_seq_length - special_tokens_count)]
__lowerCamelCase = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
__lowerCamelCase = [sequence_a_segment_id] * len(SCREAMING_SNAKE_CASE__ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
__lowerCamelCase = [cls_token] + tokens
__lowerCamelCase = [pad_token_label_id] + label_ids
__lowerCamelCase = [cls_token_segment_id] + segment_ids
__lowerCamelCase = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
__lowerCamelCase = [1 if mask_padding_with_zero else 0] * len(SCREAMING_SNAKE_CASE__ )
# Zero-pad up to the sequence length.
__lowerCamelCase = max_seq_length - len(SCREAMING_SNAKE_CASE__ )
if pad_on_left:
__lowerCamelCase = ([pad_token] * padding_length) + input_ids
__lowerCamelCase = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
__lowerCamelCase = ([pad_token_segment_id] * padding_length) + segment_ids
__lowerCamelCase = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(SCREAMING_SNAKE_CASE__ ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE__ ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE__ ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE__ ) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''' )
logger.info('''guid: %s''' , example.guid )
logger.info('''tokens: %s''' , ''' '''.join([str(SCREAMING_SNAKE_CASE__ ) for x in tokens] ) )
logger.info('''input_ids: %s''' , ''' '''.join([str(SCREAMING_SNAKE_CASE__ ) for x in input_ids] ) )
logger.info('''input_mask: %s''' , ''' '''.join([str(SCREAMING_SNAKE_CASE__ ) for x in input_mask] ) )
logger.info('''segment_ids: %s''' , ''' '''.join([str(SCREAMING_SNAKE_CASE__ ) for x in segment_ids] ) )
logger.info('''label_ids: %s''' , ''' '''.join([str(SCREAMING_SNAKE_CASE__ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
__lowerCamelCase = None
features.append(
InputFeatures(
input_ids=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , label_ids=SCREAMING_SNAKE_CASE__ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowerCAmelCase__ ( __lowercase ):
a__ : List[InputFeatures]
a__ : int = nn.CrossEntropyLoss().ignore_index
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : TokenClassificationTask , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : Split = Split.train , ) -> Union[str, Any]:
# Load data features from cache or dataset file
__lowerCamelCase = os.path.join(
SCREAMING_SNAKE_CASE__ , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(SCREAMING_SNAKE_CASE__ ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowerCamelCase = cached_features_file + '''.lock'''
with FileLock(SCREAMING_SNAKE_CASE__ ):
if os.path.exists(SCREAMING_SNAKE_CASE__ ) and not overwrite_cache:
logger.info(f'''Loading features from cached file {cached_features_file}''' )
__lowerCamelCase = torch.load(SCREAMING_SNAKE_CASE__ )
else:
logger.info(f'''Creating features from dataset file at {data_dir}''' )
__lowerCamelCase = token_classification_task.read_examples_from_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# TODO clean up all this to leverage built-in features of tokenizers
__lowerCamelCase = token_classification_task.convert_examples_to_features(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=SCREAMING_SNAKE_CASE__ , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f'''Saving features into cached file {cached_features_file}''' )
torch.save(self.features , SCREAMING_SNAKE_CASE__ )
def __len__( self : Dict ) -> str:
return len(self.features )
def __getitem__( self : Any , SCREAMING_SNAKE_CASE__ : Dict ) -> InputFeatures:
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase__ :
a__ : List[InputFeatures]
a__ : int = -100
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : TokenClassificationTask , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Split = Split.train , ) -> List[Any]:
__lowerCamelCase = token_classification_task.read_examples_from_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# TODO clean up all this to leverage built-in features of tokenizers
__lowerCamelCase = token_classification_task.convert_examples_to_features(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=SCREAMING_SNAKE_CASE__ , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
__lowerCamelCase = tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE__ , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
__lowerCamelCase = tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE__ , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None] ),
'''attention_mask''': tf.TensorShape([None] ),
'''token_type_ids''': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def __A ( self : Union[str, Any] ) -> Union[str, Any]:
__lowerCamelCase = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : List[Any] ) -> Any:
return len(self.features )
def __getitem__( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> InputFeatures:
return self.features[i]
| 270 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
def __magic_name__ ( __lowerCAmelCase : int ) -> List[Any]:
# initialize config
if "resnet-50" in model_name:
__lowerCamelCase = ResNetConfig.from_pretrained('''microsoft/resnet-50''' )
elif "resnet-101" in model_name:
__lowerCamelCase = ResNetConfig.from_pretrained('''microsoft/resnet-101''' )
else:
raise ValueError('''Model name should include either resnet50 or resnet101''' )
__lowerCamelCase = DetrConfig(use_timm_backbone=__lowerCAmelCase , backbone_config=__lowerCAmelCase )
# set label attributes
__lowerCamelCase = '''panoptic''' in model_name
if is_panoptic:
__lowerCamelCase = 250
else:
__lowerCamelCase = 91
__lowerCamelCase = '''huggingface/label-files'''
__lowerCamelCase = '''coco-detection-id2label.json'''
__lowerCamelCase = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
__lowerCamelCase = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
__lowerCamelCase = idalabel
__lowerCamelCase = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def __magic_name__ ( __lowerCAmelCase : int ) -> Union[str, Any]:
# here we list all keys to be renamed (original name on the left, our name on the right)
__lowerCamelCase = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.conv1.weight''', '''backbone.conv_encoder.model.embedder.embedder.convolution.weight''') )
rename_keys.append(('''backbone.0.body.bn1.weight''', '''backbone.conv_encoder.model.embedder.embedder.normalization.weight''') )
rename_keys.append(('''backbone.0.body.bn1.bias''', '''backbone.conv_encoder.model.embedder.embedder.normalization.bias''') )
rename_keys.append(('''backbone.0.body.bn1.running_mean''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_mean''') )
rename_keys.append(('''backbone.0.body.bn1.running_var''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_var''') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
f'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
f'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
] )
return rename_keys
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] ) -> int:
__lowerCamelCase = state_dict.pop(__lowerCAmelCase )
__lowerCamelCase = val
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str]=False ) -> Tuple:
__lowerCamelCase = ''''''
if is_panoptic:
__lowerCamelCase = '''detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__lowerCamelCase = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
__lowerCamelCase = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase = in_proj_weight[:256, :]
__lowerCamelCase = in_proj_bias[:256]
__lowerCamelCase = in_proj_weight[256:512, :]
__lowerCamelCase = in_proj_bias[256:512]
__lowerCamelCase = in_proj_weight[-256:, :]
__lowerCamelCase = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
__lowerCamelCase = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
__lowerCamelCase = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase = in_proj_weight[:256, :]
__lowerCamelCase = in_proj_bias[:256]
__lowerCamelCase = in_proj_weight[256:512, :]
__lowerCamelCase = in_proj_bias[256:512]
__lowerCamelCase = in_proj_weight[-256:, :]
__lowerCamelCase = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
__lowerCamelCase = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
__lowerCamelCase = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
__lowerCamelCase = in_proj_weight_cross_attn[:256, :]
__lowerCamelCase = in_proj_bias_cross_attn[:256]
__lowerCamelCase = in_proj_weight_cross_attn[256:512, :]
__lowerCamelCase = in_proj_bias_cross_attn[256:512]
__lowerCamelCase = in_proj_weight_cross_attn[-256:, :]
__lowerCamelCase = in_proj_bias_cross_attn[-256:]
def __magic_name__ ( ) -> List[str]:
__lowerCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowerCamelCase = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : Any=None , __lowerCAmelCase : Union[str, Any]=False ) -> List[str]:
__lowerCamelCase , __lowerCamelCase = get_detr_config(__lowerCAmelCase )
# load original model from torch hub
__lowerCamelCase = {
'''detr-resnet-50''': '''detr_resnet50''',
'''detr-resnet-101''': '''detr_resnet101''',
}
logger.info(f'''Converting model {model_name}...''' )
__lowerCamelCase = torch.hub.load('''facebookresearch/detr''' , model_name_to_original_name[model_name] , pretrained=__lowerCAmelCase ).eval()
__lowerCamelCase = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(__lowerCAmelCase ):
if is_panoptic:
__lowerCamelCase = '''detr.''' + src
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(__lowerCAmelCase , is_panoptic=__lowerCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__lowerCamelCase = '''detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
__lowerCamelCase = state_dict.pop(__lowerCAmelCase )
__lowerCamelCase = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__lowerCamelCase = state_dict.pop(__lowerCAmelCase )
__lowerCamelCase = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
__lowerCamelCase = state_dict.pop(__lowerCAmelCase )
__lowerCamelCase = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
__lowerCamelCase = state_dict.pop(__lowerCAmelCase )
__lowerCamelCase = val
# finally, create HuggingFace model and load state dict
__lowerCamelCase = DetrForSegmentation(__lowerCAmelCase ) if is_panoptic else DetrForObjectDetection(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
# verify our conversion on an image
__lowerCamelCase = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
__lowerCamelCase = DetrImageProcessor(format=__lowerCAmelCase )
__lowerCamelCase = processor(images=prepare_img() , return_tensors='''pt''' )
__lowerCamelCase = encoding['''pixel_values''']
__lowerCamelCase = detr(__lowerCAmelCase )
__lowerCamelCase = model(__lowerCAmelCase )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('''Uploading PyTorch model and image processor to the hub...''' )
model.push_to_hub(f'''nielsr/{model_name}''' )
processor.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="detr-resnet-50",
type=str,
choices=["detr-resnet-50", "detr-resnet-101"],
help="Name of the DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to the hub or not.")
SCREAMING_SNAKE_CASE__ : str = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 270 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCAmelCase__ ( __lowercase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : NestedDataStructureLike[PathLike] , SCREAMING_SNAKE_CASE__ : Optional[NamedSplit] = None , SCREAMING_SNAKE_CASE__ : Optional[Features] = None , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[int] = None , **SCREAMING_SNAKE_CASE__ : str , ) -> Union[str, Any]:
super().__init__(
SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ , streaming=SCREAMING_SNAKE_CASE__ , num_proc=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = path_or_paths if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else {self.split: path_or_paths}
__lowerCamelCase = Text(
cache_dir=SCREAMING_SNAKE_CASE__ , data_files=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def __A ( self : int ) -> Dict:
# Build iterable dataset
if self.streaming:
__lowerCamelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
self.builder.download_and_prepare(
download_config=SCREAMING_SNAKE_CASE__ , download_mode=SCREAMING_SNAKE_CASE__ , verification_mode=SCREAMING_SNAKE_CASE__ , base_path=SCREAMING_SNAKE_CASE__ , num_proc=self.num_proc , )
__lowerCamelCase = self.builder.as_dataset(
split=self.split , verification_mode=SCREAMING_SNAKE_CASE__ , in_memory=self.keep_in_memory )
return dataset
| 270 | 1 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class lowerCAmelCase__ ( __lowercase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple=13 , SCREAMING_SNAKE_CASE__ : str=7 , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : int=99 , SCREAMING_SNAKE_CASE__ : List[Any]=32 , SCREAMING_SNAKE_CASE__ : str=5 , SCREAMING_SNAKE_CASE__ : List[str]=4 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=64 , SCREAMING_SNAKE_CASE__ : int="gelu" , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : int=5_12 , SCREAMING_SNAKE_CASE__ : int=16 , SCREAMING_SNAKE_CASE__ : Optional[int]=2 , SCREAMING_SNAKE_CASE__ : Dict=0.02 , SCREAMING_SNAKE_CASE__ : Tuple=3 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : Optional[int]=2 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE__ : str=1 , ) -> Tuple:
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
__lowerCamelCase = q_groups
__lowerCamelCase = k_groups
__lowerCamelCase = v_groups
__lowerCamelCase = post_attention_groups
__lowerCamelCase = intermediate_groups
__lowerCamelCase = output_groups
def __A ( self : str ) -> Tuple:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self : Dict ) -> Optional[Any]:
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple:
__lowerCamelCase = SqueezeBertModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple:
__lowerCamelCase = SqueezeBertForMaskedLM(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> Tuple:
__lowerCamelCase = SqueezeBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCamelCase = model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , start_positions=SCREAMING_SNAKE_CASE__ , end_positions=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str ) -> Optional[Any]:
__lowerCamelCase = self.num_labels
__lowerCamelCase = SqueezeBertForSequenceClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int ) -> Any:
__lowerCamelCase = self.num_labels
__lowerCamelCase = SqueezeBertForTokenClassification(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> Union[str, Any]:
__lowerCamelCase = self.num_choices
__lowerCamelCase = SqueezeBertForMultipleChoice(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self : Optional[Any] ) -> Tuple:
__lowerCamelCase = self.prepare_config_and_inputs()
((__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase)) = config_and_inputs
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __lowercase , __lowercase , unittest.TestCase ):
a__ : List[Any] = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
a__ : Any = (
{
"""feature-extraction""": SqueezeBertModel,
"""fill-mask""": SqueezeBertForMaskedLM,
"""question-answering""": SqueezeBertForQuestionAnswering,
"""text-classification""": SqueezeBertForSequenceClassification,
"""token-classification""": SqueezeBertForTokenClassification,
"""zero-shot""": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ : int = False
a__ : Dict = True
a__ : str = False
def __A ( self : Any ) -> Tuple:
__lowerCamelCase = SqueezeBertModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , dim=37 )
def __A ( self : Tuple ) -> Tuple:
self.config_tester.run_common_tests()
def __A ( self : Optional[int] ) -> List[str]:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*SCREAMING_SNAKE_CASE__ )
def __A ( self : Dict ) -> Tuple:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*SCREAMING_SNAKE_CASE__ )
def __A ( self : Union[str, Any] ) -> List[str]:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[Any] ) -> str:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*SCREAMING_SNAKE_CASE__ )
def __A ( self : Tuple ) -> List[str]:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*SCREAMING_SNAKE_CASE__ )
def __A ( self : Any ) -> Any:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*SCREAMING_SNAKE_CASE__ )
@slow
def __A ( self : Dict ) -> Union[str, Any]:
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = SqueezeBertModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __A ( self : str ) -> Optional[Any]:
__lowerCamelCase = SqueezeBertForSequenceClassification.from_pretrained('''squeezebert/squeezebert-mnli''' )
__lowerCamelCase = torch.tensor([[1, 2_94_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 13, 15_88, 2]] )
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )[0]
__lowerCamelCase = torch.Size((1, 3) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
| 270 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 270 | 1 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : int = 32 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Union[int, float] = 1 / 2_55 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = [0.48145466, 0.4578275, 0.40821073] , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = [0.26862954, 0.26130258, 0.27577711] , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Any]=7 , SCREAMING_SNAKE_CASE__ : List[Any]=30 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4_00 , SCREAMING_SNAKE_CASE__ : int=3 , ) -> List[str]:
__lowerCamelCase = parent
__lowerCamelCase = do_resize
__lowerCamelCase = size if size is not None else {'''shortest_edge''': 2_88}
__lowerCamelCase = size_divisor
__lowerCamelCase = do_rescale
__lowerCamelCase = rescale_factor
__lowerCamelCase = do_normalize
__lowerCamelCase = do_center_crop
__lowerCamelCase = image_mean
__lowerCamelCase = image_std
__lowerCamelCase = do_pad
__lowerCamelCase = batch_size
__lowerCamelCase = num_channels
__lowerCamelCase = min_resolution
__lowerCamelCase = max_resolution
def __A ( self : List[str] ) -> List[Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def __A ( self : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str]=False ) -> int:
if not batched:
__lowerCamelCase = self.size['''shortest_edge''']
__lowerCamelCase = image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE__ , Image.Image ):
__lowerCamelCase , __lowerCamelCase = image.size
else:
__lowerCamelCase , __lowerCamelCase = image.shape[1], image.shape[2]
__lowerCamelCase = size / min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if h < w:
__lowerCamelCase , __lowerCamelCase = size, scale * w
else:
__lowerCamelCase , __lowerCamelCase = scale * h, size
__lowerCamelCase = int((13_33 / 8_00) * size )
if max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) > max_size:
__lowerCamelCase = max_size / max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = newh * scale
__lowerCamelCase = neww * scale
__lowerCamelCase , __lowerCamelCase = int(newh + 0.5 ), int(neww + 0.5 )
__lowerCamelCase , __lowerCamelCase = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
__lowerCamelCase = []
for image in image_inputs:
__lowerCamelCase , __lowerCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCamelCase = max(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : item[0] )[0]
__lowerCamelCase = max(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCAmelCase__ ( __lowercase , unittest.TestCase ):
a__ : Union[str, Any] = BridgeTowerImageProcessor if is_vision_available() else None
def __A ( self : Dict ) -> Union[str, Any]:
__lowerCamelCase = BridgeTowerImageProcessingTester(self )
@property
def __A ( self : str ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Union[str, Any] ) -> int:
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''image_mean''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''image_std''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_resize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''size''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''size_divisor''' ) )
def __A ( self : int ) -> Optional[int]:
pass
def __A ( self : str ) -> str:
# Initialize image processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__lowerCamelCase , __lowerCamelCase = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).pixel_values
__lowerCamelCase , __lowerCamelCase = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __A ( self : Dict ) -> List[str]:
# Initialize image processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__lowerCamelCase , __lowerCamelCase = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).pixel_values
__lowerCamelCase , __lowerCamelCase = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __A ( self : Dict ) -> str:
# Initialize image processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__lowerCamelCase , __lowerCamelCase = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).pixel_values
__lowerCamelCase , __lowerCamelCase = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 270 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "\\n@inproceedings{popovic-2015-chrf,\n title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",\n month = sep,\n year = \"2015\",\n address = \"Lisbon, Portugal\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W15-3049\",\n doi = \"10.18653/v1/W15-3049\",\n pages = \"392--395\",\n}\n@inproceedings{popovic-2017-chrf,\n title = \"chr{F}++: words helping character n-grams\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Second Conference on Machine Translation\",\n month = sep,\n year = \"2017\",\n address = \"Copenhagen, Denmark\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W17-4770\",\n doi = \"10.18653/v1/W17-4770\",\n pages = \"612--618\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
SCREAMING_SNAKE_CASE__ : int = "\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n"
SCREAMING_SNAKE_CASE__ : Optional[Any] = "\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n 'score' (float): The chrF (chrF++) score,\n 'char_order' (int): The character n-gram order,\n 'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n 'beta' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def __A ( self : Dict ) -> Tuple:
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/mjpost/sacreBLEU#chrf--chrf''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#chrf--chrf'''] , reference_urls=[
'''https://github.com/m-popovic/chrF''',
] , )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int = CHRF.CHAR_ORDER , SCREAMING_SNAKE_CASE__ : int = CHRF.WORD_ORDER , SCREAMING_SNAKE_CASE__ : int = CHRF.BETA , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , ) -> Optional[Any]:
__lowerCamelCase = len(references[0] )
if any(len(SCREAMING_SNAKE_CASE__ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
__lowerCamelCase = [[refs[i] for refs in references] for i in range(SCREAMING_SNAKE_CASE__ )]
__lowerCamelCase = CHRF(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = sb_chrf.corpus_score(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 270 | 1 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class lowerCAmelCase__ ( unittest.TestCase ):
def __A ( self : Optional[int] ) -> List[Any]:
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='''utf-8''' , check=SCREAMING_SNAKE_CASE__ , )
assert hasattr(self , '''env''' )
def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]:
# configuration for running training on smdistributed Model Parallel
__lowerCamelCase = {
'''enabled''': True,
'''processes_per_host''': 8,
}
__lowerCamelCase = {
'''enabled''': True,
'''parameters''': {
'''microbatches''': 4,
'''placement_strategy''': '''spread''',
'''pipeline''': '''interleaved''',
'''optimize''': '''speed''',
'''partitions''': 4,
'''ddp''': True,
},
}
__lowerCamelCase = {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options}
__lowerCamelCase = '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=SCREAMING_SNAKE_CASE__ , instance_type=self.instance_type , debugger_hook_config=SCREAMING_SNAKE_CASE__ , hyperparameters={
**self.env.hyperparameters,
'''model_name_or_path''': self.model_name_or_path,
'''max_steps''': 5_00,
} , metric_definitions=self.env.metric_definitions , distribution=SCREAMING_SNAKE_CASE__ , py_version='''py36''' , )
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[str]:
TrainingJobAnalytics(SCREAMING_SNAKE_CASE__ ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
# create estimator
__lowerCamelCase = self.create_estimator(SCREAMING_SNAKE_CASE__ )
# run training
estimator.fit()
# result dataframe
__lowerCamelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__lowerCamelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
__lowerCamelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowerCamelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , SCREAMING_SNAKE_CASE__ )
| 270 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : List[Any] = {
"configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : int = [
"RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ResNetForImageClassification",
"ResNetModel",
"ResNetPreTrainedModel",
"ResNetBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [
"TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFResNetForImageClassification",
"TFResNetModel",
"TFResNetPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
"FlaxResNetForImageClassification",
"FlaxResNetModel",
"FlaxResNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 270 | 1 |
from collections import Counter
from timeit import timeit
def __magic_name__ ( __lowerCAmelCase : str = "" , ) -> bool:
return sum(c % 2 for c in Counter(input_str.replace(''' ''' , '''''' ).lower() ).values() ) < 2
def __magic_name__ ( __lowerCAmelCase : str = "" ) -> bool:
if len(__lowerCAmelCase ) == 0:
return True
__lowerCamelCase = input_str.replace(''' ''' , '''''' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__lowerCamelCase = {}
for character in lower_case_input_str:
__lowerCamelCase = character_freq_dict.get(__lowerCAmelCase , 0 ) + 1
__lowerCamelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def __magic_name__ ( __lowerCAmelCase : str = "" ) -> None:
print('''\nFor string = ''' , __lowerCAmelCase , ''':''' )
print(
'''> can_string_be_rearranged_as_palindrome_counter()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome_counter(__lowerCAmelCase ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome_counter(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
print(
'''> can_string_be_rearranged_as_palindrome()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome(__lowerCAmelCase ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
SCREAMING_SNAKE_CASE__ : str = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F'{check_str} can {"" if status else "not "}be rearranged as a palindrome')
| 270 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
SCREAMING_SNAKE_CASE__ : str = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any]=7 , SCREAMING_SNAKE_CASE__ : List[str]=3 , SCREAMING_SNAKE_CASE__ : Tuple=18 , SCREAMING_SNAKE_CASE__ : int=30 , SCREAMING_SNAKE_CASE__ : Any=4_00 , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , ) -> Union[str, Any]:
__lowerCamelCase = size if size is not None else {'''height''': 20, '''width''': 20}
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = num_channels
__lowerCamelCase = image_size
__lowerCamelCase = min_resolution
__lowerCamelCase = max_resolution
__lowerCamelCase = size
__lowerCamelCase = do_normalize
__lowerCamelCase = do_convert_rgb
__lowerCamelCase = [5_12, 10_24, 20_48, 40_96]
__lowerCamelCase = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
def __A ( self : Union[str, Any] ) -> Union[str, Any]:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __A ( self : int ) -> Any:
__lowerCamelCase = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
__lowerCamelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __lowercase , unittest.TestCase ):
a__ : Dict = PixaStructImageProcessor if is_vision_available() else None
def __A ( self : Optional[Any] ) -> List[Any]:
__lowerCamelCase = PixaStructImageProcessingTester(self )
@property
def __A ( self : Optional[int] ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Tuple ) -> Dict:
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_convert_rgb''' ) )
def __A ( self : int ) -> str:
__lowerCamelCase = self.image_processor_tester.prepare_dummy_image()
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
__lowerCamelCase = 20_48
__lowerCamelCase = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def __A ( self : Union[str, Any] ) -> Dict:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __A ( self : Dict ) -> str:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
__lowerCamelCase = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
__lowerCamelCase = '''Hello'''
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ , header_text=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ , header_text=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __A ( self : List[str] ) -> Any:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __A ( self : Tuple ) -> List[str]:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __lowercase , unittest.TestCase ):
a__ : Any = PixaStructImageProcessor if is_vision_available() else None
def __A ( self : Union[str, Any] ) -> Optional[Any]:
__lowerCamelCase = PixaStructImageProcessingTester(self , num_channels=4 )
__lowerCamelCase = 3
@property
def __A ( self : List[Any] ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Union[str, Any] ) -> List[str]:
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_convert_rgb''' ) )
def __A ( self : Optional[int] ) -> str:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 270 | 1 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class lowerCAmelCase__ ( unittest.TestCase ):
def __A ( self : int ) -> List[Any]:
__lowerCamelCase = logging.get_logger()
# the current default level is logging.WARNING
__lowerCamelCase = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(SCREAMING_SNAKE_CASE__ )
def __A ( self : str ) -> str:
__lowerCamelCase = logging.get_verbosity()
__lowerCamelCase = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
__lowerCamelCase = '''Testing 1, 2, 3'''
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(SCREAMING_SNAKE_CASE__ ) as cl:
logger.warning(SCREAMING_SNAKE_CASE__ )
self.assertEqual(cl.out , msg + '''\n''' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(SCREAMING_SNAKE_CASE__ ) as cl:
logger.warning(SCREAMING_SNAKE_CASE__ )
self.assertEqual(cl.out , '''''' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(SCREAMING_SNAKE_CASE__ ) as cl:
logger.warning(SCREAMING_SNAKE_CASE__ )
self.assertEqual(cl.out , msg + '''\n''' )
# restore to the original level
logging.set_verbosity(SCREAMING_SNAKE_CASE__ )
@mockenv(TRANSFORMERS_VERBOSITY='''error''' )
def __A ( self : Dict ) -> Any:
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
__lowerCamelCase = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
__lowerCamelCase = os.getenv('''TRANSFORMERS_VERBOSITY''' , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = logging.log_levels[env_level_str]
__lowerCamelCase = logging.get_verbosity()
self.assertEqual(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , f'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' , )
# restore to the original level
__lowerCamelCase = ''''''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='''super-error''' )
def __A ( self : Tuple ) -> Tuple:
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
__lowerCamelCase = logging.logging.getLogger()
with CaptureLogger(SCREAMING_SNAKE_CASE__ ) as cl:
# this action activates the env var
logging.get_logger('''transformers.models.bart.tokenization_bart''' )
self.assertIn('''Unknown option TRANSFORMERS_VERBOSITY=super-error''' , cl.out )
# no need to restore as nothing was changed
def __A ( self : Any ) -> Dict:
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
__lowerCamelCase = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
__lowerCamelCase = '''Testing 1, 2, 3'''
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''1''' ):
# nothing should be logged as env var disables this method
with CaptureLogger(SCREAMING_SNAKE_CASE__ ) as cl:
logger.warning_advice(SCREAMING_SNAKE_CASE__ )
self.assertEqual(cl.out , '''''' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''''' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(SCREAMING_SNAKE_CASE__ ) as cl:
logger.warning_advice(SCREAMING_SNAKE_CASE__ )
self.assertEqual(cl.out , msg + '''\n''' )
def __magic_name__ ( ) -> Tuple:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 270 |
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class lowerCAmelCase__ ( __lowercase ):
def __init__( self : int ) -> Optional[int]:
__lowerCamelCase = []
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple:
self.events.append('''on_init_end''' )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
self.events.append('''on_train_begin''' )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : int ) -> Any:
self.events.append('''on_train_end''' )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
self.events.append('''on_epoch_begin''' )
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[Any]:
self.events.append('''on_epoch_end''' )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : str ) -> List[Any]:
self.events.append('''on_step_begin''' )
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
self.events.append('''on_step_end''' )
def __A ( self : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]:
self.events.append('''on_evaluate''' )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : str ) -> str:
self.events.append('''on_predict''' )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple:
self.events.append('''on_save''' )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> List[str]:
self.events.append('''on_log''' )
def __A ( self : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str:
self.events.append('''on_prediction_step''' )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
def __A ( self : Union[str, Any] ) -> Optional[Any]:
__lowerCamelCase = tempfile.mkdtemp()
def __A ( self : int ) -> List[str]:
shutil.rmtree(self.output_dir )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any]=0 , SCREAMING_SNAKE_CASE__ : Any=0 , SCREAMING_SNAKE_CASE__ : List[str]=64 , SCREAMING_SNAKE_CASE__ : Optional[int]=64 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=False , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Union[str, Any]:
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
__lowerCamelCase = RegressionDataset(length=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = RegressionDataset(length=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = RegressionModelConfig(a=SCREAMING_SNAKE_CASE__ , b=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = RegressionPreTrainedModel(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = TrainingArguments(self.output_dir , disable_tqdm=SCREAMING_SNAKE_CASE__ , report_to=[] , **SCREAMING_SNAKE_CASE__ )
return Trainer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , train_dataset=SCREAMING_SNAKE_CASE__ , eval_dataset=SCREAMING_SNAKE_CASE__ , callbacks=SCREAMING_SNAKE_CASE__ , )
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str ) -> Optional[int]:
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
# Order doesn't matter
__lowerCamelCase = sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : cb.__name__ if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cb.__class__.__name__ )
__lowerCamelCase = sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : cb.__name__ if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cb.__class__.__name__ )
for cba, cba in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(SCREAMING_SNAKE_CASE__ , cba.__class__ )
elif not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(cba.__class__ , SCREAMING_SNAKE_CASE__ )
else:
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
__lowerCamelCase = ['''on_init_end''', '''on_train_begin''']
__lowerCamelCase = 0
__lowerCamelCase = len(trainer.get_eval_dataloader() )
__lowerCamelCase = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(SCREAMING_SNAKE_CASE__ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def __A ( self : Union[str, Any] ) -> int:
__lowerCamelCase = self.get_trainer()
__lowerCamelCase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
# Callbacks passed at init are added to the default callbacks
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
__lowerCamelCase = self.get_trainer(disable_tqdm=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
def __A ( self : List[Any] ) -> str:
__lowerCamelCase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
__lowerCamelCase = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(SCREAMING_SNAKE_CASE__ )
expected_callbacks.remove(SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.get_trainer()
__lowerCamelCase = trainer.pop_callback(SCREAMING_SNAKE_CASE__ )
self.assertEqual(cb.__class__ , SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
trainer.add_callback(SCREAMING_SNAKE_CASE__ )
expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
# We can also add, pop, or remove by instance
__lowerCamelCase = self.get_trainer()
__lowerCamelCase = trainer.callback_handler.callbacks[0]
trainer.remove_callback(SCREAMING_SNAKE_CASE__ )
expected_callbacks.remove(SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.get_trainer()
__lowerCamelCase = trainer.callback_handler.callbacks[0]
__lowerCamelCase = trainer.pop_callback(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
trainer.add_callback(SCREAMING_SNAKE_CASE__ )
expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
def __A ( self : Union[str, Any] ) -> Any:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' , category=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
# Independent log/save/eval
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='''steps''' )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='''epoch''' )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
# A bit of everything
__lowerCamelCase = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='''steps''' , )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
__lowerCamelCase = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(SCREAMING_SNAKE_CASE__ ) in warn_mock.call_args[0][0]
| 270 | 1 |
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def __magic_name__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : str=[] ) -> Any:
__lowerCamelCase = size[0] - overlap_pixels * 2
__lowerCamelCase = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
__lowerCamelCase = np.ones((size_y, size_x) , dtype=np.uinta ) * 255
__lowerCamelCase = np.pad(__lowerCAmelCase , mode='''linear_ramp''' , pad_width=__lowerCAmelCase , end_values=0 )
if "l" in remove_borders:
__lowerCamelCase = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
__lowerCamelCase = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
__lowerCamelCase = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
__lowerCamelCase = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] ) -> int:
return max(__lowerCAmelCase , min(__lowerCAmelCase , __lowerCAmelCase ) )
def __magic_name__ ( __lowerCAmelCase : [int] , __lowerCAmelCase : [int] , __lowerCAmelCase : [int] ) -> Optional[Any]:
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def __magic_name__ ( __lowerCAmelCase : [int] , __lowerCAmelCase : int , __lowerCAmelCase : [int] ) -> List[str]:
__lowerCamelCase = list(__lowerCAmelCase )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
__lowerCamelCase = clamp_rect(__lowerCAmelCase , [0, 0] , [image_size[0], image_size[1]] )
return rect
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int ) -> Any:
__lowerCamelCase = Image.new('''RGB''' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(__lowerCAmelCase , (original_slice, 0) )
return result
def __magic_name__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] ) -> Union[str, Any]:
__lowerCamelCase = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
__lowerCamelCase = tile.crop(__lowerCAmelCase )
return tile
def __magic_name__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str ) -> Dict:
__lowerCamelCase = n % d
return n - divisor
class lowerCAmelCase__ ( __lowercase ):
def __init__( self : str , SCREAMING_SNAKE_CASE__ : AutoencoderKL , SCREAMING_SNAKE_CASE__ : CLIPTextModel , SCREAMING_SNAKE_CASE__ : CLIPTokenizer , SCREAMING_SNAKE_CASE__ : UNetaDConditionModel , SCREAMING_SNAKE_CASE__ : DDPMScheduler , SCREAMING_SNAKE_CASE__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , SCREAMING_SNAKE_CASE__ : int = 3_50 , ) -> Dict:
super().__init__(
vae=SCREAMING_SNAKE_CASE__ , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , unet=SCREAMING_SNAKE_CASE__ , low_res_scheduler=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , max_noise_level=SCREAMING_SNAKE_CASE__ , )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : int ) -> List[str]:
torch.manual_seed(0 )
__lowerCamelCase = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
__lowerCamelCase = add_overlap_rect(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , image.size )
__lowerCamelCase = image.crop(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
__lowerCamelCase = translated_slice_x - (original_image_slice / 2)
__lowerCamelCase = max(0 , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = squeeze_tile(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = to_input.size
__lowerCamelCase = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
__lowerCamelCase = super(SCREAMING_SNAKE_CASE__ , self ).__call__(image=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).images[0]
__lowerCamelCase = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
__lowerCamelCase = unsqueeze_tile(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
__lowerCamelCase = []
if x == 0:
remove_borders.append('''l''' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('''r''' )
if y == 0:
remove_borders.append('''t''' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('''b''' )
__lowerCamelCase = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=SCREAMING_SNAKE_CASE__ ) , mode='''L''' , )
final_image.paste(
SCREAMING_SNAKE_CASE__ , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def __call__( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, List[str]] , SCREAMING_SNAKE_CASE__ : Union[PIL.Image.Image, List[PIL.Image.Image]] , SCREAMING_SNAKE_CASE__ : int = 75 , SCREAMING_SNAKE_CASE__ : float = 9.0 , SCREAMING_SNAKE_CASE__ : int = 50 , SCREAMING_SNAKE_CASE__ : Optional[Union[str, List[str]]] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = 1 , SCREAMING_SNAKE_CASE__ : float = 0.0 , SCREAMING_SNAKE_CASE__ : Optional[torch.Generator] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : int = 1_28 , SCREAMING_SNAKE_CASE__ : int = 32 , SCREAMING_SNAKE_CASE__ : int = 32 , ) -> Union[str, Any]:
__lowerCamelCase = Image.new('''RGB''' , (image.size[0] * 4, image.size[1] * 4) )
__lowerCamelCase = math.ceil(image.size[0] / tile_size )
__lowerCamelCase = math.ceil(image.size[1] / tile_size )
__lowerCamelCase = tcx * tcy
__lowerCamelCase = 0
for y in range(SCREAMING_SNAKE_CASE__ ):
for x in range(SCREAMING_SNAKE_CASE__ ):
self._process_tile(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , prompt=SCREAMING_SNAKE_CASE__ , num_inference_steps=SCREAMING_SNAKE_CASE__ , guidance_scale=SCREAMING_SNAKE_CASE__ , noise_level=SCREAMING_SNAKE_CASE__ , negative_prompt=SCREAMING_SNAKE_CASE__ , num_images_per_prompt=SCREAMING_SNAKE_CASE__ , eta=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , latents=SCREAMING_SNAKE_CASE__ , )
current_count += 1
if callback is not None:
callback({'''progress''': current_count / total_tile_count, '''image''': final_image} )
return final_image
def __magic_name__ ( ) -> Optional[Any]:
# Run a demo
__lowerCamelCase = '''stabilityai/stable-diffusion-x4-upscaler'''
__lowerCamelCase = StableDiffusionTiledUpscalePipeline.from_pretrained(__lowerCAmelCase , revision='''fp16''' , torch_dtype=torch.floataa )
__lowerCamelCase = pipe.to('''cuda''' )
__lowerCamelCase = Image.open('''../../docs/source/imgs/diffusers_library.jpg''' )
def callback(__lowerCAmelCase : Union[str, Any] ):
print(f'''progress: {obj['progress']:.4f}''' )
obj["image"].save('''diffusers_library_progress.jpg''' )
__lowerCamelCase = pipe(image=__lowerCAmelCase , prompt='''Black font, white background, vector''' , noise_level=40 , callback=__lowerCAmelCase )
final_image.save('''diffusers_library.jpg''' )
if __name__ == "__main__":
main()
| 270 |
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class lowerCAmelCase__ :
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> Dict:
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
__lowerCamelCase = img
__lowerCamelCase = img.shape[1]
__lowerCamelCase = img.shape[0]
__lowerCamelCase = dst_width
__lowerCamelCase = dst_height
__lowerCamelCase = self.src_w / self.dst_w
__lowerCamelCase = self.src_h / self.dst_h
__lowerCamelCase = __lowerCamelCase = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 2_55
)
def __A ( self : List[Any] ) -> str:
for i in range(self.dst_h ):
for j in range(self.dst_w ):
__lowerCamelCase = self.img[self.get_y(SCREAMING_SNAKE_CASE__ )][self.get_x(SCREAMING_SNAKE_CASE__ )]
def __A ( self : str , SCREAMING_SNAKE_CASE__ : int ) -> int:
return int(self.ratio_x * x )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : int ) -> int:
return int(self.ratio_y * y )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = 800, 600
SCREAMING_SNAKE_CASE__ : int = imread("image_data/lena.jpg", 1)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F'Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}', n.output
)
waitKey(0)
destroyAllWindows()
| 270 | 1 |
from __future__ import annotations
SCREAMING_SNAKE_CASE__ : Optional[Any] = list[tuple[int, int]]
SCREAMING_SNAKE_CASE__ : Tuple = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class lowerCAmelCase__ :
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Node | None , ) -> str:
__lowerCamelCase = pos_x
__lowerCamelCase = pos_y
__lowerCamelCase = (pos_y, pos_x)
__lowerCamelCase = goal_x
__lowerCamelCase = goal_y
__lowerCamelCase = g_cost
__lowerCamelCase = parent
__lowerCamelCase = self.calculate_heuristic()
def __A ( self : Optional[int] ) -> float:
__lowerCamelCase = abs(self.pos_x - self.goal_x )
__lowerCamelCase = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] ) -> bool:
return self.f_cost < other.f_cost
class lowerCAmelCase__ :
def __init__( self : int , SCREAMING_SNAKE_CASE__ : tuple[int, int] , SCREAMING_SNAKE_CASE__ : tuple[int, int] ) -> str:
__lowerCamelCase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = [self.start]
__lowerCamelCase = []
__lowerCamelCase = False
def __A ( self : Dict ) -> Path | None:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__lowerCamelCase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
__lowerCamelCase = True
return self.retrace_path(SCREAMING_SNAKE_CASE__ )
self.closed_nodes.append(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.get_successors(SCREAMING_SNAKE_CASE__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(SCREAMING_SNAKE_CASE__ )
else:
# retrieve the best current path
__lowerCamelCase = self.open_nodes.pop(self.open_nodes.index(SCREAMING_SNAKE_CASE__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(SCREAMING_SNAKE_CASE__ )
else:
self.open_nodes.append(SCREAMING_SNAKE_CASE__ )
if not self.reached:
return [self.start.pos]
return None
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Node ) -> list[Node]:
__lowerCamelCase = []
for action in delta:
__lowerCamelCase = parent.pos_x + action[1]
__lowerCamelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , SCREAMING_SNAKE_CASE__ , ) )
return successors
def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : Node | None ) -> Path:
__lowerCamelCase = node
__lowerCamelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowerCamelCase = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[int] = (0, 0)
SCREAMING_SNAKE_CASE__ : Optional[Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("------")
SCREAMING_SNAKE_CASE__ : str = GreedyBestFirst(init, goal)
SCREAMING_SNAKE_CASE__ : Dict = greedy_bf.search()
if path:
for pos_x, pos_y in path:
SCREAMING_SNAKE_CASE__ : Tuple = 2
for elem in grid:
print(elem)
| 270 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def __magic_name__ ( __lowerCAmelCase : dict , __lowerCAmelCase : str , __lowerCAmelCase : set , __lowerCAmelCase : set , __lowerCAmelCase : dict , __lowerCAmelCase : dict , __lowerCAmelCase : PriorityQueue , __lowerCAmelCase : dict , __lowerCAmelCase : float | int , ) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
__lowerCamelCase = cst_fwd.get(__lowerCAmelCase , np.inf )
__lowerCamelCase = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
__lowerCamelCase = new_cost_f
__lowerCamelCase = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
__lowerCamelCase = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : dict , __lowerCAmelCase : dict ) -> int:
__lowerCamelCase = -1
__lowerCamelCase = set()
__lowerCamelCase = set()
__lowerCamelCase = {source: 0}
__lowerCamelCase = {destination: 0}
__lowerCamelCase = {source: None}
__lowerCamelCase = {destination: None}
__lowerCamelCase = PriorityQueue()
__lowerCamelCase = PriorityQueue()
__lowerCamelCase = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
__lowerCamelCase , __lowerCamelCase = queue_forward.get()
visited_forward.add(__lowerCAmelCase )
__lowerCamelCase , __lowerCamelCase = queue_backward.get()
visited_backward.add(__lowerCAmelCase )
__lowerCamelCase = pass_and_relaxation(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
__lowerCamelCase = pass_and_relaxation(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
__lowerCamelCase = shortest_distance
return shortest_path_distance
SCREAMING_SNAKE_CASE__ : List[Any] = {
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["G", 2]],
"F": [],
"G": [["F", 1]],
}
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"B": [["E", 1]],
"C": [["B", 1]],
"D": [["C", 1]],
"F": [["D", 1], ["G", 1]],
"E": [[None, np.inf]],
"G": [["E", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 270 | 1 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def __magic_name__ ( __lowerCAmelCase : Tuple ) -> Optional[int]:
__lowerCamelCase = {}
__lowerCamelCase = job['''started_at''']
__lowerCamelCase = job['''completed_at''']
__lowerCamelCase = date_parser.parse(__lowerCAmelCase )
__lowerCamelCase = date_parser.parse(__lowerCAmelCase )
__lowerCamelCase = round((end_datetime - start_datetime).total_seconds() / 60.0 )
__lowerCamelCase = start
__lowerCamelCase = end
__lowerCamelCase = duration_in_min
return job_info
def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int]=None ) -> Tuple:
__lowerCamelCase = None
if token is not None:
__lowerCamelCase = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f'''Bearer {token}'''}
__lowerCamelCase = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
__lowerCamelCase = requests.get(__lowerCAmelCase , headers=__lowerCAmelCase ).json()
__lowerCamelCase = {}
try:
job_time.update({job['''name''']: extract_time_from_single_job(__lowerCAmelCase ) for job in result['''jobs''']} )
__lowerCamelCase = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(__lowerCAmelCase ):
__lowerCamelCase = requests.get(url + f'''&page={i + 2}''' , headers=__lowerCAmelCase ).json()
job_time.update({job['''name''']: extract_time_from_single_job(__lowerCAmelCase ) for job in result['''jobs''']} )
return job_time
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE__ : List[str] = get_job_time(args.workflow_run_id)
SCREAMING_SNAKE_CASE__ : List[Any] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'{k}: {v["duration"]}')
| 270 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE__ : List[Any] = TypeVar("T")
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
return (position - 1) // 2
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
return (2 * position) + 1
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
return (2 * position) + 2
class lowerCAmelCase__ ( Generic[T] ):
def __init__( self : List[str] ) -> None:
__lowerCamelCase = []
__lowerCamelCase = {}
__lowerCamelCase = 0
def __len__( self : Optional[int] ) -> int:
return self.elements
def __repr__( self : Optional[int] ) -> str:
return str(self.heap )
def __A ( self : Union[str, Any] ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def __A ( self : str , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
__lowerCamelCase = self.elements
self.elements += 1
self._bubble_up(SCREAMING_SNAKE_CASE__ )
def __A ( self : Any ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
__lowerCamelCase , __lowerCamelCase = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__lowerCamelCase , __lowerCamelCase = self.heap[0]
self._bubble_down(SCREAMING_SNAKE_CASE__ )
return elem
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Update the weight of the given key
__lowerCamelCase = self.position_map[elem]
__lowerCamelCase = (elem, weight)
if position > 0:
__lowerCamelCase = get_parent_position(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(SCREAMING_SNAKE_CASE__ )
else:
self._bubble_down(SCREAMING_SNAKE_CASE__ )
else:
self._bubble_down(SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : T ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
__lowerCamelCase = self.position_map[elem]
if curr_pos == 0:
return None
__lowerCamelCase = get_parent_position(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase = self.heap[curr_pos]
__lowerCamelCase , __lowerCamelCase = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_up(SCREAMING_SNAKE_CASE__ )
return None
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : T ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
__lowerCamelCase = self.position_map[elem]
__lowerCamelCase , __lowerCamelCase = self.heap[curr_pos]
__lowerCamelCase = get_child_left_position(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = get_child_right_position(SCREAMING_SNAKE_CASE__ )
if child_left_position < self.elements and child_right_position < self.elements:
__lowerCamelCase , __lowerCamelCase = self.heap[child_left_position]
__lowerCamelCase , __lowerCamelCase = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
if child_left_position < self.elements:
__lowerCamelCase , __lowerCamelCase = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
else:
return None
if child_right_position < self.elements:
__lowerCamelCase , __lowerCamelCase = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
return None
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Swap the nodes at the given positions
__lowerCamelCase = self.heap[nodea_pos][0]
__lowerCamelCase = self.heap[nodea_pos][0]
__lowerCamelCase , __lowerCamelCase = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__lowerCamelCase = nodea_pos
__lowerCamelCase = nodea_pos
class lowerCAmelCase__ ( Generic[T] ):
def __init__( self : Tuple ) -> None:
__lowerCamelCase = {}
__lowerCamelCase = 0
def __repr__( self : Optional[int] ) -> str:
return str(self.connections )
def __len__( self : List[str] ) -> int:
return self.nodes
def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : T ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
__lowerCamelCase = {}
self.nodes += 1
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(SCREAMING_SNAKE_CASE__ )
self.add_node(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = weight
__lowerCamelCase = weight
def __magic_name__ ( __lowerCAmelCase : GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
__lowerCamelCase = {node: maxsize for node in graph.connections}
__lowerCamelCase = {node: None for node in graph.connections}
__lowerCamelCase = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(__lowerCAmelCase , __lowerCAmelCase )
if priority_queue.is_empty():
return dist, parent
# initialization
__lowerCamelCase = priority_queue.extract_min()
__lowerCamelCase = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__lowerCamelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowerCAmelCase , dist[neighbour] )
__lowerCamelCase = node
# running prim's algorithm
while not priority_queue.is_empty():
__lowerCamelCase = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__lowerCamelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowerCAmelCase , dist[neighbour] )
__lowerCamelCase = node
return dist, parent
| 270 | 1 |
SCREAMING_SNAKE_CASE__ : List[str] = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
SCREAMING_SNAKE_CASE__ : Tuple = ["a", "b", "c", "d", "e"]
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : int ) -> List[Any]:
__lowerCamelCase = start
# add current to visited
visited.append(__lowerCAmelCase )
__lowerCamelCase = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__lowerCamelCase = topological_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# if all neighbors visited add current to sort
sort.append(__lowerCAmelCase )
# if all vertices haven't been visited select a new one to visit
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
for vertice in vertices:
if vertice not in visited:
__lowerCamelCase = topological_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# return sort
return sort
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[str] = topological_sort("a", [], [])
print(sort)
| 270 |
from __future__ import annotations
from statistics import mean
def __magic_name__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int ) -> list[int]:
__lowerCamelCase = [0] * no_of_processes
__lowerCamelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(__lowerCAmelCase ):
__lowerCamelCase = burst_time[i]
__lowerCamelCase = []
__lowerCamelCase = 0
__lowerCamelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__lowerCamelCase = []
__lowerCamelCase = -1
for i in range(__lowerCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
__lowerCamelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__lowerCamelCase = i
total_time += burst_time[target_process]
completed += 1
__lowerCamelCase = 0
__lowerCamelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def __magic_name__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : list[int] ) -> list[int]:
__lowerCamelCase = [0] * no_of_processes
for i in range(__lowerCAmelCase ):
__lowerCamelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("[TEST CASE 01]")
SCREAMING_SNAKE_CASE__ : Tuple = 4
SCREAMING_SNAKE_CASE__ : Optional[int] = [2, 5, 3, 7]
SCREAMING_SNAKE_CASE__ : List[str] = [0, 0, 0, 0]
SCREAMING_SNAKE_CASE__ : str = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time")
for i, process_id in enumerate(list(range(1, 5))):
print(
F'{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'
F'{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'
)
print(F'\nAverage waiting time = {mean(waiting_time):.5f}')
print(F'Average turnaround time = {mean(turn_around_time):.5f}')
| 270 | 1 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase__ :
def __init__( self : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int]=13 , SCREAMING_SNAKE_CASE__ : Optional[Any]=32 , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : int=3 , SCREAMING_SNAKE_CASE__ : List[str]=16 , SCREAMING_SNAKE_CASE__ : Dict=[32, 64, 1_28] , SCREAMING_SNAKE_CASE__ : Dict=[1, 2, 1] , SCREAMING_SNAKE_CASE__ : int=[2, 2, 4] , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=2.0 , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : Optional[int]=0.0 , SCREAMING_SNAKE_CASE__ : Tuple=0.0 , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Tuple="gelu" , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE__ : int=1e-5 , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : List[str]=10 , SCREAMING_SNAKE_CASE__ : str=8 , SCREAMING_SNAKE_CASE__ : Tuple=["stage1", "stage2"] , SCREAMING_SNAKE_CASE__ : str=[1, 2] , ) -> Any:
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = embed_dim
__lowerCamelCase = hidden_sizes
__lowerCamelCase = depths
__lowerCamelCase = num_heads
__lowerCamelCase = window_size
__lowerCamelCase = mlp_ratio
__lowerCamelCase = qkv_bias
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = drop_path_rate
__lowerCamelCase = hidden_act
__lowerCamelCase = use_absolute_embeddings
__lowerCamelCase = patch_norm
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = initializer_range
__lowerCamelCase = is_training
__lowerCamelCase = scope
__lowerCamelCase = use_labels
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = encoder_stride
__lowerCamelCase = out_features
__lowerCamelCase = out_indices
def __A ( self : Tuple ) -> Any:
__lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = self.get_config()
return config, pixel_values, labels
def __A ( self : Optional[int] ) -> List[str]:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[Any]:
__lowerCamelCase = FocalNetModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__lowerCamelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] ) -> List[str]:
__lowerCamelCase = FocalNetBackbone(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
__lowerCamelCase = None
__lowerCamelCase = FocalNetBackbone(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any ) -> Any:
__lowerCamelCase = FocalNetForMaskedImageModeling(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__lowerCamelCase = 1
__lowerCamelCase = FocalNetForMaskedImageModeling(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> str:
__lowerCamelCase = self.type_sequence_label_size
__lowerCamelCase = FocalNetForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowerCamelCase = 1
__lowerCamelCase = FocalNetForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __A ( self : List[Any] ) -> Optional[Any]:
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __lowercase , __lowercase , unittest.TestCase ):
a__ : Any = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
a__ : Any = (
{"""feature-extraction""": FocalNetModel, """image-classification""": FocalNetForImageClassification}
if is_torch_available()
else {}
)
a__ : Tuple = False
a__ : Tuple = False
a__ : Dict = False
a__ : Dict = False
a__ : Dict = False
def __A ( self : List[Any] ) -> List[Any]:
__lowerCamelCase = FocalNetModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , embed_dim=37 , has_text_modality=SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[int] ) -> Optional[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __A ( self : Union[str, Any] ) -> Union[str, Any]:
return
def __A ( self : int ) -> Optional[Any]:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def __A ( self : int ) -> List[Any]:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*SCREAMING_SNAKE_CASE__ )
def __A ( self : List[Any] ) -> Optional[int]:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE__ )
def __A ( self : Tuple ) -> Optional[Any]:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ )
@unittest.skip(reason='''FocalNet does not use inputs_embeds''' )
def __A ( self : Any ) -> Tuple:
pass
@unittest.skip(reason='''FocalNet does not use feedforward chunking''' )
def __A ( self : str ) -> Any:
pass
def __A ( self : List[Any] ) -> Union[str, Any]:
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__lowerCamelCase = model_class(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ , nn.Linear ) )
def __A ( self : List[Any] ) -> int:
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__lowerCamelCase = model_class(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def __A ( self : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple:
__lowerCamelCase = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = outputs.hidden_states
__lowerCamelCase = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
# FocalNet has a different seq_length
__lowerCamelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__lowerCamelCase = outputs.reshaped_hidden_states
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = reshaped_hidden_states[0].shape
__lowerCamelCase = (
reshaped_hidden_states[0].view(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __A ( self : str ) -> Optional[int]:
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__lowerCamelCase = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[Any] ) -> str:
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = 3
__lowerCamelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__lowerCamelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCamelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__lowerCamelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__lowerCamelCase = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , (padded_height, padded_width) )
@slow
def __A ( self : int ) -> int:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = FocalNetModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def __A ( self : int ) -> int:
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = _config_zero_init(SCREAMING_SNAKE_CASE__ )
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(config=SCREAMING_SNAKE_CASE__ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def __A ( self : List[Any] ) -> Dict:
# TODO update organization
return AutoImageProcessor.from_pretrained('''microsoft/focalnet-tiny''' ) if is_vision_available() else None
@slow
def __A ( self : Tuple ) -> Tuple:
__lowerCamelCase = FocalNetForImageClassification.from_pretrained('''microsoft/focalnet-tiny''' ).to(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
__lowerCamelCase = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
__lowerCamelCase = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
__lowerCamelCase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = torch.tensor([0.2166, -0.4368, 0.2191] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_81 )
@require_torch
class lowerCAmelCase__ ( __lowercase , unittest.TestCase ):
a__ : List[Any] = (FocalNetBackbone,) if is_torch_available() else ()
a__ : List[str] = FocalNetConfig
a__ : Optional[Any] = False
def __A ( self : Union[str, Any] ) -> Tuple:
__lowerCamelCase = FocalNetModelTester(self )
| 270 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowerCAmelCase__ ( __lowercase ):
@staticmethod
@abstractmethod
def __A ( SCREAMING_SNAKE_CASE__ : ArgumentParser ) -> str:
raise NotImplementedError()
@abstractmethod
def __A ( self : Optional[int] ) -> Union[str, Any]:
raise NotImplementedError()
| 270 | 1 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class lowerCAmelCase__ :
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict=13 , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : str=99 , SCREAMING_SNAKE_CASE__ : str=32 , SCREAMING_SNAKE_CASE__ : List[Any]=5 , SCREAMING_SNAKE_CASE__ : Any=4 , SCREAMING_SNAKE_CASE__ : Dict=4 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : Tuple=0.0 , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5_12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=16 , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : Tuple=4 , SCREAMING_SNAKE_CASE__ : List[str]=None , ) -> Optional[int]:
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_multiple_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = weight_tying
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
def __A ( self : Optional[Any] ) -> str:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = self.get_config()
return config, input_ids, input_mask, token_labels
def __A ( self : List[Any] ) -> List[Any]:
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , )
def __A ( self : List[str] ) -> str:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase = True
return config, input_ids, input_mask, token_labels
def __A ( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Any:
__lowerCamelCase = GPTNeoXJapaneseModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[Any]:
__lowerCamelCase = True
__lowerCamelCase = GPTNeoXJapaneseModel(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ) -> int:
__lowerCamelCase = GPTNeoXJapaneseForCausalLM(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[Any]:
__lowerCamelCase = True
__lowerCamelCase = GPTNeoXJapaneseForCausalLM(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
# first forward pass
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowerCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCamelCase = torch.cat([input_mask, next_mask] , dim=-1 )
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = output_from_no_past['''hidden_states'''][0]
__lowerCamelCase = model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , )['''hidden_states'''][0]
# select random slice
__lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowerCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
def __A ( self : Any ) -> Dict:
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __lowercase , __lowercase , unittest.TestCase ):
a__ : List[Any] = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
a__ : Tuple = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
a__ : Dict = (
{"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
a__ : int = False
a__ : Any = False
a__ : Optional[Any] = False
a__ : List[Any] = False
def __A ( self : Any ) -> Optional[int]:
__lowerCamelCase = GPTNeoXJapaneseModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def __A ( self : List[Any] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __A ( self : int ) -> Union[str, Any]:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __A ( self : Union[str, Any] ) -> List[Any]:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __A ( self : Dict ) -> str:
# This regression test was failing with PyTorch < 1.3
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
__lowerCamelCase = None
self.model_tester.create_and_check_model_as_decoder(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[int] ) -> str:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __A ( self : str ) -> List[Any]:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*SCREAMING_SNAKE_CASE__ )
@slow
def __A ( self : Tuple ) -> Tuple:
__lowerCamelCase = '''abeja/gpt-neox-japanese-2.7b'''
__lowerCamelCase = ['''データサイエンティストとは、''', '''100年後に必要とされる会社は、''', '''フルリモートの環境で働くために必要なことは、''', '''国境の長いトンネルを抜けると''', '''美味しい日本食といえば、''']
__lowerCamelCase = [
'''データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。''',
'''100年後に必要とされる会社は、「人」が中心の会社です。''',
'''フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。''',
'''国境の長いトンネルを抜けると、そこは雪国だった。''',
'''美味しい日本食といえば、やっぱりお寿司ですよね。''',
]
__lowerCamelCase = GPTNeoXJapaneseTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = GPTNeoXJapaneseForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = []
for prompt in prompts:
__lowerCamelCase = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).input_ids
__lowerCamelCase = model.generate(SCREAMING_SNAKE_CASE__ , max_length=50 )
__lowerCamelCase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
predicted_outputs += generated_string
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 270 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class lowerCAmelCase__ ( unittest.TestCase ):
def __A ( self : Optional[int] ) -> List[Any]:
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='''utf-8''' , check=SCREAMING_SNAKE_CASE__ , )
assert hasattr(self , '''env''' )
def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]:
# configuration for running training on smdistributed Model Parallel
__lowerCamelCase = {
'''enabled''': True,
'''processes_per_host''': 8,
}
__lowerCamelCase = {
'''enabled''': True,
'''parameters''': {
'''microbatches''': 4,
'''placement_strategy''': '''spread''',
'''pipeline''': '''interleaved''',
'''optimize''': '''speed''',
'''partitions''': 4,
'''ddp''': True,
},
}
__lowerCamelCase = {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options}
__lowerCamelCase = '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=SCREAMING_SNAKE_CASE__ , instance_type=self.instance_type , debugger_hook_config=SCREAMING_SNAKE_CASE__ , hyperparameters={
**self.env.hyperparameters,
'''model_name_or_path''': self.model_name_or_path,
'''max_steps''': 5_00,
} , metric_definitions=self.env.metric_definitions , distribution=SCREAMING_SNAKE_CASE__ , py_version='''py36''' , )
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[str]:
TrainingJobAnalytics(SCREAMING_SNAKE_CASE__ ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
# create estimator
__lowerCamelCase = self.create_estimator(SCREAMING_SNAKE_CASE__ )
# run training
estimator.fit()
# result dataframe
__lowerCamelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__lowerCamelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
__lowerCamelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowerCamelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , SCREAMING_SNAKE_CASE__ )
| 270 | 1 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
SCREAMING_SNAKE_CASE__ : str = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_tests_dir("fixtures/vocab.json")
SCREAMING_SNAKE_CASE__ : Optional[int] = get_tests_dir("fixtures")
class lowerCAmelCase__ ( unittest.TestCase ):
a__ : str = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
def __A ( self : List[Any] ) -> List[Any]:
__lowerCamelCase = 0
def __A ( self : Optional[Any] ) -> List[Any]:
__lowerCamelCase = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __A ( self : Any ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase = WavaVecaConfig()
__lowerCamelCase = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
# save in new folder
model_config.save_pretrained(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[Any] ) -> Any:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
copyfile(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , '''vocab.json''' ) )
__lowerCamelCase = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __A ( self : str ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase = WavaVecaFeatureExtractor()
__lowerCamelCase = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
__lowerCamelCase = WavaVecaProcessor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# save in new folder
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
# drop `processor_class` in tokenizer
with open(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , '''r''' ) as f:
__lowerCamelCase = json.load(SCREAMING_SNAKE_CASE__ )
config_dict.pop('''processor_class''' )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , '''w''' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __A ( self : int ) -> str:
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase = WavaVecaFeatureExtractor()
__lowerCamelCase = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
__lowerCamelCase = WavaVecaProcessor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# save in new folder
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
# drop `processor_class` in feature extractor
with open(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , '''r''' ) as f:
__lowerCamelCase = json.load(SCREAMING_SNAKE_CASE__ )
config_dict.pop('''processor_class''' )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , '''w''' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[Any] ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' )
model_config.save_pretrained(SCREAMING_SNAKE_CASE__ )
# copy relevant files
copyfile(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , '''vocab.json''' ) )
# create emtpy sample processor
with open(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , '''w''' ) as f:
f.write('''{}''' )
__lowerCamelCase = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __A ( self : str ) -> Any:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE__ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
__lowerCamelCase = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
__lowerCamelCase = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
__lowerCamelCase = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def __A ( self : Any ) -> Tuple:
try:
AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE__ )
AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
AutoTokenizer.register(SCREAMING_SNAKE_CASE__ , slow_tokenizer_class=SCREAMING_SNAKE_CASE__ )
AutoProcessor.register(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
AutoProcessor.register(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Now that the config is registered, it can be used as any other config with the auto-API
__lowerCamelCase = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCamelCase = os.path.join(SCREAMING_SNAKE_CASE__ , '''vocab.txt''' )
with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
__lowerCamelCase = CustomTokenizer(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = CustomProcessor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __A ( self : Dict ) -> Dict:
class lowerCAmelCase__ ( __lowercase ):
a__ : List[Any] = False
class lowerCAmelCase__ ( __lowercase ):
a__ : List[str] = False
class lowerCAmelCase__ ( __lowercase ):
a__ : Union[str, Any] = """AutoFeatureExtractor"""
a__ : int = """AutoTokenizer"""
a__ : int = False
try:
AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE__ )
AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
AutoTokenizer.register(SCREAMING_SNAKE_CASE__ , slow_tokenizer_class=SCREAMING_SNAKE_CASE__ )
AutoProcessor.register(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# If remote code is not set, the default is to use local classes.
__lowerCamelCase = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
__lowerCamelCase = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
__lowerCamelCase = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __A ( self : Optional[int] ) -> Any:
__lowerCamelCase = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' )
def __A ( self : int ) -> Dict:
__lowerCamelCase = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' )
self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' )
@is_staging_test
class lowerCAmelCase__ ( unittest.TestCase ):
a__ : Dict = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def __A ( cls : str ) -> Dict:
__lowerCamelCase = TOKEN
HfFolder.save_token(SCREAMING_SNAKE_CASE__ )
@classmethod
def __A ( cls : List[Any] ) -> Union[str, Any]:
try:
delete_repo(token=cls._token , repo_id='''test-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' )
except HTTPError:
pass
def __A ( self : List[Any] ) -> Union[str, Any]:
__lowerCamelCase = WavaVecaProcessor.from_pretrained(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(SCREAMING_SNAKE_CASE__ , '''test-processor''' ) , push_to_hub=SCREAMING_SNAKE_CASE__ , use_auth_token=self._token )
__lowerCamelCase = WavaVecaProcessor.from_pretrained(f'''{USER}/test-processor''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE__ , getattr(new_processor.feature_extractor , SCREAMING_SNAKE_CASE__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __A ( self : Union[str, Any] ) -> Dict:
__lowerCamelCase = WavaVecaProcessor.from_pretrained(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(SCREAMING_SNAKE_CASE__ , '''test-processor-org''' ) , push_to_hub=SCREAMING_SNAKE_CASE__ , use_auth_token=self._token , organization='''valid_org''' , )
__lowerCamelCase = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE__ , getattr(new_processor.feature_extractor , SCREAMING_SNAKE_CASE__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __A ( self : int ) -> str:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
__lowerCamelCase = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCamelCase = os.path.join(SCREAMING_SNAKE_CASE__ , '''vocab.txt''' )
with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
__lowerCamelCase = CustomTokenizer(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = CustomProcessor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f'''{USER}/test-dynamic-processor''' , token=self._token )
__lowerCamelCase = Repository(SCREAMING_SNAKE_CASE__ , clone_from=f'''{USER}/test-dynamic-processor''' , token=self._token )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''',
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(SCREAMING_SNAKE_CASE__ , '''tokenizer_config.json''' ) ) as f:
__lowerCamelCase = json.load(SCREAMING_SNAKE_CASE__ )
self.assertDictEqual(
tokenizer_config['''auto_map'''] , {
'''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None],
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ , '''custom_feature_extraction.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ , '''custom_tokenization.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ , '''custom_processing.py''' ) ) )
repo.push_to_hub()
__lowerCamelCase = AutoProcessor.from_pretrained(f'''{USER}/test-dynamic-processor''' , trust_remote_code=SCREAMING_SNAKE_CASE__ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
| 270 |
from __future__ import annotations
from fractions import Fraction
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __magic_name__ ( __lowerCAmelCase : int ) -> list[str]:
__lowerCamelCase = []
__lowerCamelCase = 11
__lowerCamelCase = int('''1''' + '''0''' * digit_len )
for num in range(__lowerCAmelCase , __lowerCAmelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(__lowerCAmelCase , __lowerCAmelCase ):
solutions.append(f'''{num}/{den}''' )
den += 1
num += 1
__lowerCamelCase = 10
return solutions
def __magic_name__ ( __lowerCAmelCase : int = 2 ) -> int:
__lowerCamelCase = 1.0
for fraction in fraction_list(__lowerCAmelCase ):
__lowerCamelCase = Fraction(__lowerCAmelCase )
result *= frac.denominator / frac.numerator
return int(__lowerCAmelCase )
if __name__ == "__main__":
print(solution())
| 270 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {}
class lowerCAmelCase__ ( __lowercase ):
a__ : Union[str, Any] = """llama"""
a__ : List[str] = ["""past_key_values"""]
def __init__( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3_20_00 , SCREAMING_SNAKE_CASE__ : List[str]=40_96 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_10_08 , SCREAMING_SNAKE_CASE__ : Optional[int]=32 , SCREAMING_SNAKE_CASE__ : str=32 , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Tuple="silu" , SCREAMING_SNAKE_CASE__ : int=20_48 , SCREAMING_SNAKE_CASE__ : Tuple=0.02 , SCREAMING_SNAKE_CASE__ : Tuple=1e-6 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : List[Any]=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1 , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : Dict=1 , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , **SCREAMING_SNAKE_CASE__ : int , ) -> Tuple:
__lowerCamelCase = vocab_size
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = hidden_size
__lowerCamelCase = intermediate_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__lowerCamelCase = num_attention_heads
__lowerCamelCase = num_key_value_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = initializer_range
__lowerCamelCase = rms_norm_eps
__lowerCamelCase = pretraining_tp
__lowerCamelCase = use_cache
__lowerCamelCase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , tie_word_embeddings=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def __A ( self : Any ) -> Optional[int]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , SCREAMING_SNAKE_CASE__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f'''got {self.rope_scaling}''' )
__lowerCamelCase = self.rope_scaling.get('''type''' , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.rope_scaling.get('''factor''' , SCREAMING_SNAKE_CASE__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 270 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ : List[str] = {
"configuration_blip": [
"BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlipConfig",
"BlipTextConfig",
"BlipVisionConfig",
],
"processing_blip": ["BlipProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = ["BlipImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : int = [
"BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlipModel",
"BlipPreTrainedModel",
"BlipForConditionalGeneration",
"BlipForQuestionAnswering",
"BlipVisionModel",
"BlipTextModel",
"BlipForImageTextRetrieval",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Any = [
"TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBlipModel",
"TFBlipPreTrainedModel",
"TFBlipForConditionalGeneration",
"TFBlipForQuestionAnswering",
"TFBlipVisionModel",
"TFBlipTextModel",
"TFBlipForImageTextRetrieval",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 270 | 1 |
class lowerCAmelCase__ :
def __init__( self : int , SCREAMING_SNAKE_CASE__ : List[str] ) -> Tuple:
__lowerCamelCase = val
__lowerCamelCase = None
__lowerCamelCase = None
def __A ( self : str , SCREAMING_SNAKE_CASE__ : str ) -> Optional[int]:
if self.val:
if val < self.val:
if self.left is None:
__lowerCamelCase = Node(SCREAMING_SNAKE_CASE__ )
else:
self.left.insert(SCREAMING_SNAKE_CASE__ )
elif val > self.val:
if self.right is None:
__lowerCamelCase = Node(SCREAMING_SNAKE_CASE__ )
else:
self.right.insert(SCREAMING_SNAKE_CASE__ )
else:
__lowerCamelCase = val
def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : int ) -> List[str]:
# Recursive traversal
if root:
inorder(root.left , __lowerCAmelCase )
res.append(root.val )
inorder(root.right , __lowerCAmelCase )
def __magic_name__ ( __lowerCAmelCase : List[Any] ) -> List[Any]:
# Build BST
if len(__lowerCAmelCase ) == 0:
return arr
__lowerCamelCase = Node(arr[0] )
for i in range(1 , len(__lowerCAmelCase ) ):
root.insert(arr[i] )
# Traverse BST in order.
__lowerCamelCase = []
inorder(__lowerCAmelCase , __lowerCAmelCase )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 270 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"configuration_wavlm": ["WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "WavLMConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : int = [
"WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"WavLMForAudioFrameClassification",
"WavLMForCTC",
"WavLMForSequenceClassification",
"WavLMForXVector",
"WavLMModel",
"WavLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 270 | 1 |
from math import sqrt
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
__lowerCamelCase = 0
for i in range(1 , int(sqrt(__lowerCAmelCase ) + 1 ) ):
if n % i == 0 and i != sqrt(__lowerCAmelCase ):
total += i + n // i
elif i == sqrt(__lowerCAmelCase ):
total += i
return total - n
def __magic_name__ ( __lowerCAmelCase : int = 1_0000 ) -> int:
__lowerCamelCase = sum(
i
for i in range(1 , __lowerCAmelCase )
if sum_of_divisors(sum_of_divisors(__lowerCAmelCase ) ) == i and sum_of_divisors(__lowerCAmelCase ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 270 |
import pprint
import requests
SCREAMING_SNAKE_CASE__ : str = "https://zenquotes.io/api"
def __magic_name__ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def __magic_name__ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : int = random_quotes()
pprint.pprint(response)
| 270 | 1 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('''repo_id''' , ['''canonical_dataset_name''', '''org-name/dataset-name'''] )
@pytest.mark.parametrize('''path''' , ['''filename.csv''', '''filename with blanks.csv'''] )
@pytest.mark.parametrize('''revision''' , [None, '''v2'''] )
def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : str ) -> Union[str, Any]:
__lowerCamelCase = hf_hub_url(repo_id=__lowerCAmelCase , path=__lowerCAmelCase , revision=__lowerCAmelCase )
assert url == f'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(__lowerCAmelCase )}'''
| 270 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
__lowerCamelCase = prime_factors(__lowerCAmelCase )
if is_square_free(__lowerCAmelCase ):
return -1 if len(__lowerCAmelCase ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 270 | 1 |
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> int:
while b:
__lowerCamelCase , __lowerCamelCase = b, a % b
return a
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> int:
return a if b == 0 else euclidean_gcd_recursive(__lowerCAmelCase , a % b )
def __magic_name__ ( ) -> List[str]:
print(f'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' )
print(f'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' )
print(f'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' )
print(f'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' )
print(f'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' )
print(f'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' )
print(f'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' )
print(f'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' )
print(f'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' )
print(f'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' )
if __name__ == "__main__":
main()
| 270 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
SCREAMING_SNAKE_CASE__ : Union[str, Any] = re.compile(r"\b(a|an|the)\b", re.UNICODE)
SCREAMING_SNAKE_CASE__ : int = None
def __magic_name__ ( ) -> str:
__lowerCamelCase = argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''' )
parser.add_argument('''data_file''' , metavar='''data.json''' , help='''Input data JSON file.''' )
parser.add_argument('''pred_file''' , metavar='''pred.json''' , help='''Model predictions.''' )
parser.add_argument(
'''--out-file''' , '''-o''' , metavar='''eval.json''' , help='''Write accuracy metrics to file (default is stdout).''' )
parser.add_argument(
'''--na-prob-file''' , '''-n''' , metavar='''na_prob.json''' , help='''Model estimates of probability of no answer.''' )
parser.add_argument(
'''--na-prob-thresh''' , '''-t''' , type=__lowerCAmelCase , default=1.0 , help='''Predict "" if no-answer probability exceeds this (default = 1.0).''' , )
parser.add_argument(
'''--out-image-dir''' , '''-p''' , metavar='''out_images''' , default=__lowerCAmelCase , help='''Save precision-recall curves to directory.''' )
parser.add_argument('''--verbose''' , '''-v''' , action='''store_true''' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Union[str, Any]:
__lowerCamelCase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__lowerCamelCase = bool(qa['''answers''']['''text'''] )
return qid_to_has_ans
def __magic_name__ ( __lowerCAmelCase : Dict ) -> Optional[Any]:
def remove_articles(__lowerCAmelCase : Optional[int] ):
return ARTICLES_REGEX.sub(''' ''' , __lowerCAmelCase )
def white_space_fix(__lowerCAmelCase : Optional[int] ):
return " ".join(text.split() )
def remove_punc(__lowerCAmelCase : Union[str, Any] ):
__lowerCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCAmelCase : Dict ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCAmelCase ) ) ) )
def __magic_name__ ( __lowerCAmelCase : List[Any] ) -> Optional[int]:
if not s:
return []
return normalize_answer(__lowerCAmelCase ).split()
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple ) -> int:
return int(normalize_answer(__lowerCAmelCase ) == normalize_answer(__lowerCAmelCase ) )
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ) -> str:
__lowerCamelCase = get_tokens(__lowerCAmelCase )
__lowerCamelCase = get_tokens(__lowerCAmelCase )
__lowerCamelCase = collections.Counter(__lowerCAmelCase ) & collections.Counter(__lowerCAmelCase )
__lowerCamelCase = sum(common.values() )
if len(__lowerCAmelCase ) == 0 or len(__lowerCAmelCase ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
__lowerCamelCase = 1.0 * num_same / len(__lowerCAmelCase )
__lowerCamelCase = 1.0 * num_same / len(__lowerCAmelCase )
__lowerCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ) -> Optional[Any]:
__lowerCamelCase = {}
__lowerCamelCase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__lowerCamelCase = qa['''id''']
__lowerCamelCase = [t for t in qa['''answers''']['''text'''] if normalize_answer(__lowerCAmelCase )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
__lowerCamelCase = ['''''']
if qid not in preds:
print(f'''Missing prediction for {qid}''' )
continue
__lowerCamelCase = preds[qid]
# Take max over all gold answers
__lowerCamelCase = max(compute_exact(__lowerCAmelCase , __lowerCAmelCase ) for a in gold_answers )
__lowerCamelCase = max(compute_fa(__lowerCAmelCase , __lowerCAmelCase ) for a in gold_answers )
return exact_scores, fa_scores
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] ) -> List[str]:
__lowerCamelCase = {}
for qid, s in scores.items():
__lowerCamelCase = na_probs[qid] > na_prob_thresh
if pred_na:
__lowerCamelCase = float(not qid_to_has_ans[qid] )
else:
__lowerCamelCase = s
return new_scores
def __magic_name__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any]=None ) -> Union[str, Any]:
if not qid_list:
__lowerCamelCase = len(__lowerCAmelCase )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores.values() ) / total),
('''f1''', 100.0 * sum(fa_scores.values() ) / total),
('''total''', total),
] )
else:
__lowerCamelCase = len(__lowerCAmelCase )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('''f1''', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('''total''', total),
] )
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] ) -> int:
for k in new_eval:
__lowerCamelCase = new_eval[k]
def __magic_name__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ) -> Optional[Any]:
plt.step(__lowerCAmelCase , __lowerCAmelCase , color='''b''' , alpha=0.2 , where='''post''' )
plt.fill_between(__lowerCAmelCase , __lowerCAmelCase , step='''post''' , alpha=0.2 , color='''b''' )
plt.xlabel('''Recall''' )
plt.ylabel('''Precision''' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__lowerCAmelCase )
plt.savefig(__lowerCAmelCase )
plt.clf()
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Tuple=None ) -> int:
__lowerCamelCase = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : na_probs[k] )
__lowerCamelCase = 0.0
__lowerCamelCase = 1.0
__lowerCamelCase = 0.0
__lowerCamelCase = [1.0]
__lowerCamelCase = [0.0]
__lowerCamelCase = 0.0
for i, qid in enumerate(__lowerCAmelCase ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
__lowerCamelCase = true_pos / float(i + 1 )
__lowerCamelCase = true_pos / float(__lowerCAmelCase )
if i == len(__lowerCAmelCase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__lowerCAmelCase )
recalls.append(__lowerCAmelCase )
if out_image:
plot_pr_curve(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return {"ap": 100.0 * avg_prec}
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] ) -> List[Any]:
if out_image_dir and not os.path.exists(__lowerCAmelCase ):
os.makedirs(__lowerCAmelCase )
__lowerCamelCase = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_exact.png''' ) , title='''Precision-Recall curve for Exact Match score''' , )
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_f1.png''' ) , title='''Precision-Recall curve for F1 score''' , )
__lowerCamelCase = {k: float(__lowerCAmelCase ) for k, v in qid_to_has_ans.items()}
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_oracle.png''' ) , title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''' , )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_exact''' )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_f1''' )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_oracle''' )
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int ) -> Optional[Any]:
if not qid_list:
return
__lowerCamelCase = [na_probs[k] for k in qid_list]
__lowerCamelCase = np.ones_like(__lowerCAmelCase ) / float(len(__lowerCAmelCase ) )
plt.hist(__lowerCAmelCase , weights=__lowerCAmelCase , bins=20 , range=(0.0, 1.0) )
plt.xlabel('''Model probability of no-answer''' )
plt.ylabel('''Proportion of dataset''' )
plt.title(f'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(__lowerCAmelCase , f'''na_prob_hist_{name}.png''' ) )
plt.clf()
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
__lowerCamelCase = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
__lowerCamelCase = num_no_ans
__lowerCamelCase = cur_score
__lowerCamelCase = 0.0
__lowerCamelCase = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : na_probs[k] )
for i, qid in enumerate(__lowerCAmelCase ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
__lowerCamelCase = scores[qid]
else:
if preds[qid]:
__lowerCamelCase = -1
else:
__lowerCamelCase = 0
cur_score += diff
if cur_score > best_score:
__lowerCamelCase = cur_score
__lowerCamelCase = na_probs[qid]
return 100.0 * best_score / len(__lowerCAmelCase ), best_thresh
def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] ) -> int:
__lowerCamelCase , __lowerCamelCase = find_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase , __lowerCamelCase = find_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = best_exact
__lowerCamelCase = exact_thresh
__lowerCamelCase = best_fa
__lowerCamelCase = fa_thresh
def __magic_name__ ( ) -> Optional[int]:
with open(OPTS.data_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
__lowerCamelCase = dataset_json['''data''']
with open(OPTS.pred_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
else:
__lowerCamelCase = {k: 0.0 for k in preds}
__lowerCamelCase = make_qid_to_has_ans(__lowerCAmelCase ) # maps qid to True/False
__lowerCamelCase = [k for k, v in qid_to_has_ans.items() if v]
__lowerCamelCase = [k for k, v in qid_to_has_ans.items() if not v]
__lowerCamelCase , __lowerCamelCase = get_raw_scores(__lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = apply_no_ans_threshold(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.na_prob_thresh )
__lowerCamelCase = apply_no_ans_threshold(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.na_prob_thresh )
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase )
if has_ans_qids:
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase , qid_list=__lowerCAmelCase )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''HasAns''' )
if no_ans_qids:
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase , qid_list=__lowerCAmelCase )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''NoAns''' )
if OPTS.na_prob_file:
find_all_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir )
histogram_na_prob(__lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir , '''hasAns''' )
histogram_na_prob(__lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir , '''noAns''' )
if OPTS.out_file:
with open(OPTS.out_file , '''w''' ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
else:
print(json.dumps(__lowerCAmelCase , indent=2 ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 270 | 1 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE__ : List[str] = "RegNetConfig"
# Base docstring
SCREAMING_SNAKE_CASE__ : Tuple = "facebook/regnet-y-040"
SCREAMING_SNAKE_CASE__ : List[Any] = [1, 1_088, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE__ : Optional[Any] = "facebook/regnet-y-040"
SCREAMING_SNAKE_CASE__ : int = "tabby, tabby cat"
SCREAMING_SNAKE_CASE__ : Tuple = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : Optional[str] = "relu" , ) -> Tuple:
super().__init__()
__lowerCamelCase = nn.Convad(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , padding=kernel_size // 2 , groups=SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = nn.BatchNormad(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = ACTaFN[activation] if activation is not None else nn.Identity()
def __A ( self : int , SCREAMING_SNAKE_CASE__ : int ) -> Any:
__lowerCamelCase = self.convolution(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.normalization(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.activation(SCREAMING_SNAKE_CASE__ )
return hidden_state
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : int , SCREAMING_SNAKE_CASE__ : RegNetConfig ) -> str:
super().__init__()
__lowerCamelCase = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
__lowerCamelCase = config.num_channels
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[int]:
__lowerCamelCase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
__lowerCamelCase = self.embedder(SCREAMING_SNAKE_CASE__ )
return hidden_state
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 2 ) -> Dict:
super().__init__()
__lowerCamelCase = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=1 , stride=SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = nn.BatchNormad(SCREAMING_SNAKE_CASE__ )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tensor ) -> Tensor:
__lowerCamelCase = self.convolution(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.normalization(SCREAMING_SNAKE_CASE__ )
return hidden_state
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> Tuple:
super().__init__()
__lowerCamelCase = nn.AdaptiveAvgPoolad((1, 1) )
__lowerCamelCase = nn.Sequential(
nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=1 ) , nn.ReLU() , nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=1 ) , nn.Sigmoid() , )
def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : int ) -> Union[str, Any]:
# b c h w -> b c 1 1
__lowerCamelCase = self.pooler(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.attention(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = hidden_state * attention
return hidden_state
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : RegNetConfig , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 1 ) -> Dict:
super().__init__()
__lowerCamelCase = in_channels != out_channels or stride != 1
__lowerCamelCase = max(1 , out_channels // config.groups_width )
__lowerCamelCase = (
RegNetShortCut(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ ) if should_apply_shortcut else nn.Identity()
)
__lowerCamelCase = nn.Sequential(
RegNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , groups=SCREAMING_SNAKE_CASE__ , activation=config.hidden_act ) , RegNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE__ ) , )
__lowerCamelCase = ACTaFN[config.hidden_act]
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ) -> List[Any]:
__lowerCamelCase = hidden_state
__lowerCamelCase = self.layer(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.shortcut(SCREAMING_SNAKE_CASE__ )
hidden_state += residual
__lowerCamelCase = self.activation(SCREAMING_SNAKE_CASE__ )
return hidden_state
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : int , SCREAMING_SNAKE_CASE__ : RegNetConfig , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 1 ) -> List[str]:
super().__init__()
__lowerCamelCase = in_channels != out_channels or stride != 1
__lowerCamelCase = max(1 , out_channels // config.groups_width )
__lowerCamelCase = (
RegNetShortCut(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ ) if should_apply_shortcut else nn.Identity()
)
__lowerCamelCase = nn.Sequential(
RegNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , groups=SCREAMING_SNAKE_CASE__ , activation=config.hidden_act ) , RegNetSELayer(SCREAMING_SNAKE_CASE__ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE__ ) , )
__lowerCamelCase = ACTaFN[config.hidden_act]
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[Any]:
__lowerCamelCase = hidden_state
__lowerCamelCase = self.layer(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.shortcut(SCREAMING_SNAKE_CASE__ )
hidden_state += residual
__lowerCamelCase = self.activation(SCREAMING_SNAKE_CASE__ )
return hidden_state
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : str , SCREAMING_SNAKE_CASE__ : RegNetConfig , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , ) -> int:
super().__init__()
__lowerCamelCase = RegNetXLayer if config.layer_type == '''x''' else RegNetYLayer
__lowerCamelCase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , ) , *[layer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(depth - 1 )] , )
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]:
__lowerCamelCase = self.layers(SCREAMING_SNAKE_CASE__ )
return hidden_state
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : RegNetConfig ) -> int:
super().__init__()
__lowerCamelCase = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
SCREAMING_SNAKE_CASE__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
__lowerCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(SCREAMING_SNAKE_CASE__ , config.depths[1:] ):
self.stages.append(RegNetStage(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , depth=SCREAMING_SNAKE_CASE__ ) )
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : Tensor , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = True ) -> BaseModelOutputWithNoAttention:
__lowerCamelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__lowerCamelCase = hidden_states + (hidden_state,)
__lowerCamelCase = stage_module(SCREAMING_SNAKE_CASE__ )
if output_hidden_states:
__lowerCamelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE__ , hidden_states=SCREAMING_SNAKE_CASE__ )
class lowerCAmelCase__ ( __lowercase ):
a__ : List[Any] = RegNetConfig
a__ : int = """regnet"""
a__ : Optional[Any] = """pixel_values"""
a__ : List[Any] = True
def __A ( self : int , SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
if isinstance(SCREAMING_SNAKE_CASE__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(SCREAMING_SNAKE_CASE__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple=False ) -> List[Any]:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = value
SCREAMING_SNAKE_CASE__ : str = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
SCREAMING_SNAKE_CASE__ : Optional[int] = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , __lowercase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class lowerCAmelCase__ ( __lowercase ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]:
super().__init__(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = config
__lowerCamelCase = RegNetEmbeddings(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = RegNetEncoder(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : Tensor , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
__lowerCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase = self.embedder(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.encoder(
SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = encoder_outputs[0]
__lowerCamelCase = self.pooler(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE__ , pooler_output=SCREAMING_SNAKE_CASE__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , __lowercase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class lowerCAmelCase__ ( __lowercase ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] ) -> List[Any]:
super().__init__(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = config.num_labels
__lowerCamelCase = RegNetModel(SCREAMING_SNAKE_CASE__ )
# classification head
__lowerCamelCase = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.LongTensor] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
__lowerCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase = self.regnet(SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = outputs.pooler_output if return_dict else outputs[1]
__lowerCamelCase = self.classifier(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowerCamelCase = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowerCamelCase = '''single_label_classification'''
else:
__lowerCamelCase = '''multi_label_classification'''
if self.config.problem_type == "regression":
__lowerCamelCase = MSELoss()
if self.num_labels == 1:
__lowerCamelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__lowerCamelCase = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif self.config.problem_type == "single_label_classification":
__lowerCamelCase = CrossEntropyLoss()
__lowerCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__lowerCamelCase = BCEWithLogitsLoss()
__lowerCamelCase = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not return_dict:
__lowerCamelCase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=SCREAMING_SNAKE_CASE__ , logits=SCREAMING_SNAKE_CASE__ , hidden_states=outputs.hidden_states )
| 270 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F'{bindir}/../../examples/pytorch/translation'):
from run_translation import main # noqa
set_seed(42)
SCREAMING_SNAKE_CASE__ : Any = "sshleifer/student_marian_en_ro_6_1"
SCREAMING_SNAKE_CASE__ : Tuple = "sshleifer/tiny-mbart"
@require_torch
class lowerCAmelCase__ ( __lowercase ):
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Optional[int]=True , ) -> Optional[int]:
__lowerCamelCase = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=SCREAMING_SNAKE_CASE__ , num_train_epochs=1 , distributed=SCREAMING_SNAKE_CASE__ , extra_args_str=SCREAMING_SNAKE_CASE__ , predict_with_generate=SCREAMING_SNAKE_CASE__ , do_train=SCREAMING_SNAKE_CASE__ , do_eval=SCREAMING_SNAKE_CASE__ , do_predict=SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = TrainerState.load_from_json(os.path.join(SCREAMING_SNAKE_CASE__ , '''trainer_state.json''' ) ).log_history
if not do_eval:
return
__lowerCamelCase = [log for log in logs if '''eval_loss''' in log.keys()]
__lowerCamelCase = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
__lowerCamelCase = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , SCREAMING_SNAKE_CASE__ )
assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def __A ( self : Optional[int] ) -> int:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def __A ( self : int ) -> List[str]:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ )
@require_torch_multi_gpu
def __A ( self : Optional[Any] ) -> Tuple:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __A ( self : Dict ) -> Tuple:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--sharded_ddp simple''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __A ( self : Optional[int] ) -> List[str]:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--sharded_ddp simple --fp16''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __A ( self : Tuple ) -> Any:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=SCREAMING_SNAKE_CASE__ )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __A ( self : Dict ) -> Tuple:
self.run_seqaseq_quick(
distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=SCREAMING_SNAKE_CASE__ )
@require_apex
@require_torch_gpu
def __A ( self : Union[str, Any] ) -> List[str]:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--fp16 --fp16_backend=apex''' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--fp16 --fp16_backend=apex''' )
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] )
@require_torch_multi_gpu
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[Any]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
__lowerCamelCase = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
__lowerCamelCase = experiments[experiment_id]
__lowerCamelCase = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
__lowerCamelCase = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**SCREAMING_SNAKE_CASE__ , extra_args_str=data['''extra_args_str'''] )
__lowerCamelCase = len(re.findall(SCREAMING_SNAKE_CASE__ , cl.err ) )
self.assertEqual(SCREAMING_SNAKE_CASE__ , data['''n_matches'''] )
@slow
def __A ( self : Any ) -> Optional[Any]:
__lowerCamelCase = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=SCREAMING_SNAKE_CASE__ , learning_rate=3e-4 , num_train_epochs=10 , distributed=SCREAMING_SNAKE_CASE__ , )
# Check metrics
__lowerCamelCase = TrainerState.load_from_json(os.path.join(SCREAMING_SNAKE_CASE__ , '''trainer_state.json''' ) ).log_history
__lowerCamelCase = [log for log in logs if '''eval_loss''' in log.keys()]
__lowerCamelCase = eval_metrics[0]
__lowerCamelCase = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , SCREAMING_SNAKE_CASE__ )
# test if do_predict saves generations and metrics
__lowerCamelCase = os.listdir(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = {os.path.basename(SCREAMING_SNAKE_CASE__ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def __A ( self : Optional[int] ) -> str:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(SCREAMING_SNAKE_CASE__ : str ) -> Tuple[int, float]:
__lowerCamelCase = '''--skip_memory_metrics 0'''
__lowerCamelCase = self.run_trainer(
max_len=1_28 , model_name=SCREAMING_SNAKE_CASE__ , learning_rate=3e-4 , num_train_epochs=1 , optim=SCREAMING_SNAKE_CASE__ , distributed=SCREAMING_SNAKE_CASE__ , extra_args_str=SCREAMING_SNAKE_CASE__ , do_eval=SCREAMING_SNAKE_CASE__ , do_predict=SCREAMING_SNAKE_CASE__ , n_gpus_to_use=1 , )
# Check metrics
__lowerCamelCase = TrainerState.load_from_json(Path(SCREAMING_SNAKE_CASE__ , '''trainer_state.json''' ) ).log_history
__lowerCamelCase = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**20 )
__lowerCamelCase = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**20 )
__lowerCamelCase = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
__lowerCamelCase = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
__lowerCamelCase = gpu_peak_mem_orig + gpu_alloc_mem_orig
__lowerCamelCase = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
__lowerCamelCase = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
__lowerCamelCase = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
f''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
f''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
f''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
f''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , f'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float = 3e-3 , SCREAMING_SNAKE_CASE__ : str = "adafactor" , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : int = None , ) -> List[Any]:
__lowerCamelCase = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
__lowerCamelCase = self.get_auto_remove_tmp_dir()
__lowerCamelCase = f'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(SCREAMING_SNAKE_CASE__ )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(SCREAMING_SNAKE_CASE__ )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
__lowerCamelCase = f'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(SCREAMING_SNAKE_CASE__ )}
'''.split()
__lowerCamelCase = '''
--do_predict
'''.split()
__lowerCamelCase = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
__lowerCamelCase = get_gpu_count()
__lowerCamelCase = get_torch_dist_unique_port()
__lowerCamelCase = f'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
__lowerCamelCase = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=self.get_env() )
else:
__lowerCamelCase = ['''run_translation.py'''] + args
with patch.object(SCREAMING_SNAKE_CASE__ , '''argv''' , SCREAMING_SNAKE_CASE__ ):
main()
return output_dir
| 270 | 1 |
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 270 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ : Dict = {
"facebook/mask2former-swin-small-coco-instance": (
"https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
class lowerCAmelCase__ ( __lowercase ):
a__ : Any = """mask2former"""
a__ : Dict = ["""swin"""]
a__ : Any = {"""hidden_size""": """hidden_dim"""}
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Optional[Dict] = None , SCREAMING_SNAKE_CASE__ : int = 2_56 , SCREAMING_SNAKE_CASE__ : int = 2_56 , SCREAMING_SNAKE_CASE__ : int = 2_56 , SCREAMING_SNAKE_CASE__ : int = 10_24 , SCREAMING_SNAKE_CASE__ : str = "relu" , SCREAMING_SNAKE_CASE__ : int = 6 , SCREAMING_SNAKE_CASE__ : int = 10 , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.0 , SCREAMING_SNAKE_CASE__ : int = 20_48 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : int = 4 , SCREAMING_SNAKE_CASE__ : int = 2_55 , SCREAMING_SNAKE_CASE__ : int = 1_00 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 2.0 , SCREAMING_SNAKE_CASE__ : float = 5.0 , SCREAMING_SNAKE_CASE__ : float = 5.0 , SCREAMING_SNAKE_CASE__ : int = 1_25_44 , SCREAMING_SNAKE_CASE__ : float = 3.0 , SCREAMING_SNAKE_CASE__ : float = 0.75 , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : float = 1.0 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : List[int] = [4, 8, 16, 32] , SCREAMING_SNAKE_CASE__ : bool = None , **SCREAMING_SNAKE_CASE__ : Tuple , ) -> str:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.''' )
__lowerCamelCase = CONFIG_MAPPING['''swin'''](
image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=SCREAMING_SNAKE_CASE__ , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = backbone_config.pop('''model_type''' )
__lowerCamelCase = CONFIG_MAPPING[backbone_model_type]
__lowerCamelCase = config_class.from_dict(SCREAMING_SNAKE_CASE__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
f'''Supported model types: {','.join(self.backbones_supported )}''' )
__lowerCamelCase = backbone_config
__lowerCamelCase = feature_size
__lowerCamelCase = mask_feature_size
__lowerCamelCase = hidden_dim
__lowerCamelCase = encoder_feedforward_dim
__lowerCamelCase = activation_function
__lowerCamelCase = encoder_layers
__lowerCamelCase = decoder_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = dropout
__lowerCamelCase = dim_feedforward
__lowerCamelCase = pre_norm
__lowerCamelCase = enforce_input_projection
__lowerCamelCase = common_stride
__lowerCamelCase = ignore_value
__lowerCamelCase = num_queries
__lowerCamelCase = no_object_weight
__lowerCamelCase = class_weight
__lowerCamelCase = mask_weight
__lowerCamelCase = dice_weight
__lowerCamelCase = train_num_points
__lowerCamelCase = oversample_ratio
__lowerCamelCase = importance_sample_ratio
__lowerCamelCase = init_std
__lowerCamelCase = init_xavier_std
__lowerCamelCase = use_auxiliary_loss
__lowerCamelCase = feature_strides
__lowerCamelCase = output_auxiliary_logits
__lowerCamelCase = decoder_layers
super().__init__(**SCREAMING_SNAKE_CASE__ )
@classmethod
def __A ( cls : Any , SCREAMING_SNAKE_CASE__ : PretrainedConfig , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[Any]:
return cls(
backbone_config=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def __A ( self : Any ) -> Dict[str, any]:
__lowerCamelCase = copy.deepcopy(self.__dict__ )
__lowerCamelCase = self.backbone_config.to_dict()
__lowerCamelCase = self.__class__.model_type
return output
| 270 | 1 |
from __future__ import annotations
from statistics import mean
def __magic_name__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int ) -> list[int]:
__lowerCamelCase = [0] * no_of_processes
__lowerCamelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(__lowerCAmelCase ):
__lowerCamelCase = burst_time[i]
__lowerCamelCase = []
__lowerCamelCase = 0
__lowerCamelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__lowerCamelCase = []
__lowerCamelCase = -1
for i in range(__lowerCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
__lowerCamelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__lowerCamelCase = i
total_time += burst_time[target_process]
completed += 1
__lowerCamelCase = 0
__lowerCamelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def __magic_name__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : list[int] ) -> list[int]:
__lowerCamelCase = [0] * no_of_processes
for i in range(__lowerCAmelCase ):
__lowerCamelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("[TEST CASE 01]")
SCREAMING_SNAKE_CASE__ : Tuple = 4
SCREAMING_SNAKE_CASE__ : Optional[int] = [2, 5, 3, 7]
SCREAMING_SNAKE_CASE__ : List[str] = [0, 0, 0, 0]
SCREAMING_SNAKE_CASE__ : str = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time")
for i, process_id in enumerate(list(range(1, 5))):
print(
F'{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'
F'{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'
)
print(F'\nAverage waiting time = {mean(waiting_time):.5f}')
print(F'Average turnaround time = {mean(turn_around_time):.5f}')
| 270 |
import os
def __magic_name__ ( ) -> str:
__lowerCamelCase = os.path.join(os.path.dirname(__lowerCAmelCase ) , '''num.txt''' )
with open(__lowerCAmelCase ) as file_hand:
return str(sum(int(__lowerCAmelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 270 | 1 |
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> str:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
__lowerCamelCase = ''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__lowerCAmelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 270 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase__ :
a__ : str
a__ : List[str]
a__ : Optional[List[str]]
@dataclass
class lowerCAmelCase__ :
a__ : List[int]
a__ : List[int]
a__ : Optional[List[int]] = None
a__ : Optional[List[int]] = None
class lowerCAmelCase__ ( __lowercase ):
a__ : Optional[Any] = """train"""
a__ : Optional[int] = """dev"""
a__ : Dict = """test"""
class lowerCAmelCase__ :
@staticmethod
def __A ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[Split, str] ) -> List[InputExample]:
raise NotImplementedError
@staticmethod
def __A ( SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
raise NotImplementedError
@staticmethod
def __A ( SCREAMING_SNAKE_CASE__ : List[InputExample] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : List[str]="[CLS]" , SCREAMING_SNAKE_CASE__ : Tuple=1 , SCREAMING_SNAKE_CASE__ : str="[SEP]" , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : Tuple=0 , SCREAMING_SNAKE_CASE__ : int=0 , SCREAMING_SNAKE_CASE__ : str=-1_00 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : List[Any]=True , ) -> List[InputFeatures]:
__lowerCamelCase = {label: i for i, label in enumerate(SCREAMING_SNAKE_CASE__ )}
__lowerCamelCase = []
for ex_index, example in enumerate(SCREAMING_SNAKE_CASE__ ):
if ex_index % 1_00_00 == 0:
logger.info('''Writing example %d of %d''' , SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = []
__lowerCamelCase = []
for word, label in zip(example.words , example.labels ):
__lowerCamelCase = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(SCREAMING_SNAKE_CASE__ ) > 0:
tokens.extend(SCREAMING_SNAKE_CASE__ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(SCREAMING_SNAKE_CASE__ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
__lowerCamelCase = tokenizer.num_special_tokens_to_add()
if len(SCREAMING_SNAKE_CASE__ ) > max_seq_length - special_tokens_count:
__lowerCamelCase = tokens[: (max_seq_length - special_tokens_count)]
__lowerCamelCase = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
__lowerCamelCase = [sequence_a_segment_id] * len(SCREAMING_SNAKE_CASE__ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
__lowerCamelCase = [cls_token] + tokens
__lowerCamelCase = [pad_token_label_id] + label_ids
__lowerCamelCase = [cls_token_segment_id] + segment_ids
__lowerCamelCase = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
__lowerCamelCase = [1 if mask_padding_with_zero else 0] * len(SCREAMING_SNAKE_CASE__ )
# Zero-pad up to the sequence length.
__lowerCamelCase = max_seq_length - len(SCREAMING_SNAKE_CASE__ )
if pad_on_left:
__lowerCamelCase = ([pad_token] * padding_length) + input_ids
__lowerCamelCase = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
__lowerCamelCase = ([pad_token_segment_id] * padding_length) + segment_ids
__lowerCamelCase = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(SCREAMING_SNAKE_CASE__ ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE__ ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE__ ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE__ ) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''' )
logger.info('''guid: %s''' , example.guid )
logger.info('''tokens: %s''' , ''' '''.join([str(SCREAMING_SNAKE_CASE__ ) for x in tokens] ) )
logger.info('''input_ids: %s''' , ''' '''.join([str(SCREAMING_SNAKE_CASE__ ) for x in input_ids] ) )
logger.info('''input_mask: %s''' , ''' '''.join([str(SCREAMING_SNAKE_CASE__ ) for x in input_mask] ) )
logger.info('''segment_ids: %s''' , ''' '''.join([str(SCREAMING_SNAKE_CASE__ ) for x in segment_ids] ) )
logger.info('''label_ids: %s''' , ''' '''.join([str(SCREAMING_SNAKE_CASE__ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
__lowerCamelCase = None
features.append(
InputFeatures(
input_ids=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , label_ids=SCREAMING_SNAKE_CASE__ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowerCAmelCase__ ( __lowercase ):
a__ : List[InputFeatures]
a__ : int = nn.CrossEntropyLoss().ignore_index
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : TokenClassificationTask , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : Split = Split.train , ) -> Union[str, Any]:
# Load data features from cache or dataset file
__lowerCamelCase = os.path.join(
SCREAMING_SNAKE_CASE__ , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(SCREAMING_SNAKE_CASE__ ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowerCamelCase = cached_features_file + '''.lock'''
with FileLock(SCREAMING_SNAKE_CASE__ ):
if os.path.exists(SCREAMING_SNAKE_CASE__ ) and not overwrite_cache:
logger.info(f'''Loading features from cached file {cached_features_file}''' )
__lowerCamelCase = torch.load(SCREAMING_SNAKE_CASE__ )
else:
logger.info(f'''Creating features from dataset file at {data_dir}''' )
__lowerCamelCase = token_classification_task.read_examples_from_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# TODO clean up all this to leverage built-in features of tokenizers
__lowerCamelCase = token_classification_task.convert_examples_to_features(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=SCREAMING_SNAKE_CASE__ , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f'''Saving features into cached file {cached_features_file}''' )
torch.save(self.features , SCREAMING_SNAKE_CASE__ )
def __len__( self : Dict ) -> str:
return len(self.features )
def __getitem__( self : Any , SCREAMING_SNAKE_CASE__ : Dict ) -> InputFeatures:
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase__ :
a__ : List[InputFeatures]
a__ : int = -100
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : TokenClassificationTask , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Split = Split.train , ) -> List[Any]:
__lowerCamelCase = token_classification_task.read_examples_from_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# TODO clean up all this to leverage built-in features of tokenizers
__lowerCamelCase = token_classification_task.convert_examples_to_features(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=SCREAMING_SNAKE_CASE__ , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
__lowerCamelCase = tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE__ , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
__lowerCamelCase = tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE__ , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None] ),
'''attention_mask''': tf.TensorShape([None] ),
'''token_type_ids''': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def __A ( self : Union[str, Any] ) -> Union[str, Any]:
__lowerCamelCase = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : List[Any] ) -> Any:
return len(self.features )
def __getitem__( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> InputFeatures:
return self.features[i]
| 270 | 1 |
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def __magic_name__ ( __lowerCAmelCase : int ) -> Any:
__lowerCamelCase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def __magic_name__ ( __lowerCAmelCase : Union[str, Any] ) -> Any:
__lowerCamelCase , __lowerCamelCase = emb.weight.shape
__lowerCamelCase = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
__lowerCamelCase = emb.weight.data
return lin_layer
def __magic_name__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int]="facebook/mbart-large-en-ro" , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Dict=False ) -> List[Any]:
__lowerCamelCase = torch.load(__lowerCAmelCase , map_location='''cpu''' )['''model''']
remove_ignore_keys_(__lowerCAmelCase )
__lowerCamelCase = state_dict['''encoder.embed_tokens.weight'''].shape[0]
__lowerCamelCase = MBartConfig.from_pretrained(__lowerCAmelCase , vocab_size=__lowerCAmelCase )
if mbart_aa and finetuned:
__lowerCamelCase = '''relu'''
__lowerCamelCase = state_dict['''decoder.embed_tokens.weight''']
__lowerCamelCase = MBartForConditionalGeneration(__lowerCAmelCase )
model.model.load_state_dict(__lowerCAmelCase )
if finetuned:
__lowerCamelCase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config",
default="facebook/mbart-large-cc25",
type=str,
help="Which huggingface architecture to use: mbart-large",
)
parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint")
parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint")
SCREAMING_SNAKE_CASE__ : str = parser.parse_args()
SCREAMING_SNAKE_CASE__ : int = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 270 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCAmelCase__ ( __lowercase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : NestedDataStructureLike[PathLike] , SCREAMING_SNAKE_CASE__ : Optional[NamedSplit] = None , SCREAMING_SNAKE_CASE__ : Optional[Features] = None , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[int] = None , **SCREAMING_SNAKE_CASE__ : str , ) -> Union[str, Any]:
super().__init__(
SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ , streaming=SCREAMING_SNAKE_CASE__ , num_proc=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = path_or_paths if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else {self.split: path_or_paths}
__lowerCamelCase = Text(
cache_dir=SCREAMING_SNAKE_CASE__ , data_files=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def __A ( self : int ) -> Dict:
# Build iterable dataset
if self.streaming:
__lowerCamelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
self.builder.download_and_prepare(
download_config=SCREAMING_SNAKE_CASE__ , download_mode=SCREAMING_SNAKE_CASE__ , verification_mode=SCREAMING_SNAKE_CASE__ , base_path=SCREAMING_SNAKE_CASE__ , num_proc=self.num_proc , )
__lowerCamelCase = self.builder.as_dataset(
split=self.split , verification_mode=SCREAMING_SNAKE_CASE__ , in_memory=self.keep_in_memory )
return dataset
| 270 | 1 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class lowerCAmelCase__ :
a__ : int
a__ : int
class lowerCAmelCase__ :
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> Union[str, Any]:
__lowerCamelCase = [[] for _ in range(SCREAMING_SNAKE_CASE__ )]
__lowerCamelCase = size
def __getitem__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> Iterator[Edge]:
return iter(self._graph[vertex] )
@property
def __A ( self : str ) -> Any:
return self._size
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> int:
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> int | None:
__lowerCamelCase = deque([start_vertex] )
__lowerCamelCase = [None] * self.size
__lowerCamelCase = 0
while queue:
__lowerCamelCase = queue.popleft()
__lowerCamelCase = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
__lowerCamelCase = current_distance + edge.weight
__lowerCamelCase = distances[edge.destination_vertex]
if (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and new_distance >= dest_vertex_distance
):
continue
__lowerCamelCase = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 270 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 270 | 1 |
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
def __magic_name__ ( __lowerCAmelCase : Union[tf.Tensor, np.ndarray] ) -> List[int]:
if isinstance(__lowerCAmelCase , np.ndarray ):
return list(tensor.shape )
__lowerCamelCase = tf.shape(__lowerCAmelCase )
if tensor.shape == tf.TensorShape(__lowerCAmelCase ):
return dynamic
__lowerCamelCase = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(__lowerCAmelCase )]
def __magic_name__ ( __lowerCAmelCase : tf.Tensor , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[str] = None ) -> tf.Tensor:
return tf.nn.softmax(logits=logits + 1E-9 , axis=__lowerCAmelCase , name=__lowerCAmelCase )
def __magic_name__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any]=1E-5 , __lowerCAmelCase : Any=-1 ) -> List[Any]:
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
__lowerCamelCase , __lowerCamelCase = tf.nn.moments(__lowerCAmelCase , axes=[axis] , keepdims=__lowerCAmelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
__lowerCamelCase = [1] * inputs.shape.rank
__lowerCamelCase = shape_list(__lowerCAmelCase )[axis]
__lowerCamelCase = tf.reshape(__lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = tf.reshape(__lowerCAmelCase , __lowerCAmelCase )
# Compute layer normalization using the batch_normalization
# function.
__lowerCamelCase = tf.nn.batch_normalization(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , offset=__lowerCAmelCase , scale=__lowerCAmelCase , variance_epsilon=__lowerCAmelCase , )
return outputs
def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : Tuple=0 , __lowerCAmelCase : List[str]=-1 ) -> int:
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
__lowerCamelCase = tf.shape(__lowerCAmelCase )
__lowerCamelCase = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
__lowerCamelCase = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(__lowerCAmelCase , __lowerCAmelCase )
def __magic_name__ ( __lowerCAmelCase : tf.Tensor ) -> tf.Tensor:
if not isinstance(__lowerCAmelCase , tf.Tensor ):
__lowerCamelCase = tf.convert_to_tensor(__lowerCAmelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
__lowerCamelCase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
__lowerCamelCase = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
__lowerCamelCase = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def __magic_name__ ( __lowerCAmelCase : tf.Tensor , __lowerCAmelCase : int , __lowerCAmelCase : str = "input_ids" ) -> None:
tf.debugging.assert_less(
__lowerCAmelCase , tf.cast(__lowerCAmelCase , dtype=tensor.dtype ) , message=(
f'''The maximum value of {tensor_name} ({tf.math.reduce_max(__lowerCAmelCase )}) must be smaller than the embedding '''
f'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] ) -> Any:
__lowerCamelCase = 6_4512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
__lowerCamelCase = [x for x in data if len(__lowerCAmelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
f'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
f'''bytes: {bad_attributes}''' )
__lowerCamelCase = np.asarray(__lowerCAmelCase )
__lowerCamelCase = 1
__lowerCamelCase = np.array_split(__lowerCAmelCase , __lowerCAmelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
__lowerCamelCase = np.array_split(__lowerCAmelCase , __lowerCAmelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(__lowerCAmelCase ):
__lowerCamelCase = chunk_data
else:
__lowerCamelCase = data
def __magic_name__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] ) -> str:
if name in group.attrs:
__lowerCamelCase = [n.decode('''utf8''' ) if hasattr(__lowerCAmelCase , '''decode''' ) else n for n in group.attrs[name]]
else:
__lowerCamelCase = []
__lowerCamelCase = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(__lowerCAmelCase , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def __magic_name__ ( __lowerCAmelCase : str ) -> List[str]:
def _expand_single_ad_tensor(__lowerCAmelCase : Dict ):
if isinstance(__lowerCAmelCase , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(__lowerCAmelCase , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , __lowerCAmelCase )
| 270 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "\\n@inproceedings{popovic-2015-chrf,\n title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",\n month = sep,\n year = \"2015\",\n address = \"Lisbon, Portugal\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W15-3049\",\n doi = \"10.18653/v1/W15-3049\",\n pages = \"392--395\",\n}\n@inproceedings{popovic-2017-chrf,\n title = \"chr{F}++: words helping character n-grams\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Second Conference on Machine Translation\",\n month = sep,\n year = \"2017\",\n address = \"Copenhagen, Denmark\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W17-4770\",\n doi = \"10.18653/v1/W17-4770\",\n pages = \"612--618\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
SCREAMING_SNAKE_CASE__ : int = "\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n"
SCREAMING_SNAKE_CASE__ : Optional[Any] = "\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n 'score' (float): The chrF (chrF++) score,\n 'char_order' (int): The character n-gram order,\n 'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n 'beta' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def __A ( self : Dict ) -> Tuple:
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/mjpost/sacreBLEU#chrf--chrf''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#chrf--chrf'''] , reference_urls=[
'''https://github.com/m-popovic/chrF''',
] , )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int = CHRF.CHAR_ORDER , SCREAMING_SNAKE_CASE__ : int = CHRF.WORD_ORDER , SCREAMING_SNAKE_CASE__ : int = CHRF.BETA , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , ) -> Optional[Any]:
__lowerCamelCase = len(references[0] )
if any(len(SCREAMING_SNAKE_CASE__ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
__lowerCamelCase = [[refs[i] for refs in references] for i in range(SCREAMING_SNAKE_CASE__ )]
__lowerCamelCase = CHRF(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = sb_chrf.corpus_score(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 270 | 1 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase__ :
a__ : str
a__ : List[str]
a__ : Optional[List[str]]
@dataclass
class lowerCAmelCase__ :
a__ : List[int]
a__ : List[int]
a__ : Optional[List[int]] = None
a__ : Optional[List[int]] = None
class lowerCAmelCase__ ( __lowercase ):
a__ : Optional[Any] = """train"""
a__ : Optional[int] = """dev"""
a__ : Dict = """test"""
class lowerCAmelCase__ :
@staticmethod
def __A ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[Split, str] ) -> List[InputExample]:
raise NotImplementedError
@staticmethod
def __A ( SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
raise NotImplementedError
@staticmethod
def __A ( SCREAMING_SNAKE_CASE__ : List[InputExample] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : List[str]="[CLS]" , SCREAMING_SNAKE_CASE__ : Tuple=1 , SCREAMING_SNAKE_CASE__ : str="[SEP]" , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : Tuple=0 , SCREAMING_SNAKE_CASE__ : int=0 , SCREAMING_SNAKE_CASE__ : str=-1_00 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : List[Any]=True , ) -> List[InputFeatures]:
__lowerCamelCase = {label: i for i, label in enumerate(SCREAMING_SNAKE_CASE__ )}
__lowerCamelCase = []
for ex_index, example in enumerate(SCREAMING_SNAKE_CASE__ ):
if ex_index % 1_00_00 == 0:
logger.info('''Writing example %d of %d''' , SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = []
__lowerCamelCase = []
for word, label in zip(example.words , example.labels ):
__lowerCamelCase = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(SCREAMING_SNAKE_CASE__ ) > 0:
tokens.extend(SCREAMING_SNAKE_CASE__ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(SCREAMING_SNAKE_CASE__ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
__lowerCamelCase = tokenizer.num_special_tokens_to_add()
if len(SCREAMING_SNAKE_CASE__ ) > max_seq_length - special_tokens_count:
__lowerCamelCase = tokens[: (max_seq_length - special_tokens_count)]
__lowerCamelCase = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
__lowerCamelCase = [sequence_a_segment_id] * len(SCREAMING_SNAKE_CASE__ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
__lowerCamelCase = [cls_token] + tokens
__lowerCamelCase = [pad_token_label_id] + label_ids
__lowerCamelCase = [cls_token_segment_id] + segment_ids
__lowerCamelCase = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
__lowerCamelCase = [1 if mask_padding_with_zero else 0] * len(SCREAMING_SNAKE_CASE__ )
# Zero-pad up to the sequence length.
__lowerCamelCase = max_seq_length - len(SCREAMING_SNAKE_CASE__ )
if pad_on_left:
__lowerCamelCase = ([pad_token] * padding_length) + input_ids
__lowerCamelCase = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
__lowerCamelCase = ([pad_token_segment_id] * padding_length) + segment_ids
__lowerCamelCase = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(SCREAMING_SNAKE_CASE__ ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE__ ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE__ ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE__ ) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''' )
logger.info('''guid: %s''' , example.guid )
logger.info('''tokens: %s''' , ''' '''.join([str(SCREAMING_SNAKE_CASE__ ) for x in tokens] ) )
logger.info('''input_ids: %s''' , ''' '''.join([str(SCREAMING_SNAKE_CASE__ ) for x in input_ids] ) )
logger.info('''input_mask: %s''' , ''' '''.join([str(SCREAMING_SNAKE_CASE__ ) for x in input_mask] ) )
logger.info('''segment_ids: %s''' , ''' '''.join([str(SCREAMING_SNAKE_CASE__ ) for x in segment_ids] ) )
logger.info('''label_ids: %s''' , ''' '''.join([str(SCREAMING_SNAKE_CASE__ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
__lowerCamelCase = None
features.append(
InputFeatures(
input_ids=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , label_ids=SCREAMING_SNAKE_CASE__ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowerCAmelCase__ ( __lowercase ):
a__ : List[InputFeatures]
a__ : int = nn.CrossEntropyLoss().ignore_index
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : TokenClassificationTask , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : Split = Split.train , ) -> Union[str, Any]:
# Load data features from cache or dataset file
__lowerCamelCase = os.path.join(
SCREAMING_SNAKE_CASE__ , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(SCREAMING_SNAKE_CASE__ ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowerCamelCase = cached_features_file + '''.lock'''
with FileLock(SCREAMING_SNAKE_CASE__ ):
if os.path.exists(SCREAMING_SNAKE_CASE__ ) and not overwrite_cache:
logger.info(f'''Loading features from cached file {cached_features_file}''' )
__lowerCamelCase = torch.load(SCREAMING_SNAKE_CASE__ )
else:
logger.info(f'''Creating features from dataset file at {data_dir}''' )
__lowerCamelCase = token_classification_task.read_examples_from_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# TODO clean up all this to leverage built-in features of tokenizers
__lowerCamelCase = token_classification_task.convert_examples_to_features(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=SCREAMING_SNAKE_CASE__ , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f'''Saving features into cached file {cached_features_file}''' )
torch.save(self.features , SCREAMING_SNAKE_CASE__ )
def __len__( self : Dict ) -> str:
return len(self.features )
def __getitem__( self : Any , SCREAMING_SNAKE_CASE__ : Dict ) -> InputFeatures:
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase__ :
a__ : List[InputFeatures]
a__ : int = -100
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : TokenClassificationTask , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Split = Split.train , ) -> List[Any]:
__lowerCamelCase = token_classification_task.read_examples_from_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# TODO clean up all this to leverage built-in features of tokenizers
__lowerCamelCase = token_classification_task.convert_examples_to_features(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=SCREAMING_SNAKE_CASE__ , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
__lowerCamelCase = tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE__ , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
__lowerCamelCase = tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE__ , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None] ),
'''attention_mask''': tf.TensorShape([None] ),
'''token_type_ids''': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def __A ( self : Union[str, Any] ) -> Union[str, Any]:
__lowerCamelCase = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : List[Any] ) -> Any:
return len(self.features )
def __getitem__( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> InputFeatures:
return self.features[i]
| 270 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : List[Any] = {
"configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : int = [
"RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ResNetForImageClassification",
"ResNetModel",
"ResNetPreTrainedModel",
"ResNetBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [
"TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFResNetForImageClassification",
"TFResNetModel",
"TFResNetPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
"FlaxResNetForImageClassification",
"FlaxResNetModel",
"FlaxResNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 270 | 1 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn.grep_linear": "encoder.layers.*.attention.gru_rel_pos_linear",
"self_attn.relative_attention_bias": "encoder.layers.*.attention.rel_attn_embed",
"self_attn.grep_a": "encoder.layers.*.attention.gru_rel_pos_const",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
SCREAMING_SNAKE_CASE__ : Tuple = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] ) -> Optional[int]:
for attribute in key.split('''.''' ):
__lowerCamelCase = getattr(__lowerCAmelCase , __lowerCAmelCase )
if weight_type is not None:
__lowerCamelCase = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape
else:
__lowerCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__lowerCamelCase = value
elif weight_type == "weight_g":
__lowerCamelCase = value
elif weight_type == "weight_v":
__lowerCamelCase = value
elif weight_type == "bias":
__lowerCamelCase = value
else:
__lowerCamelCase = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ) -> Tuple:
__lowerCamelCase = []
__lowerCamelCase = fairseq_model.state_dict()
__lowerCamelCase = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == '''group''' , )
__lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__lowerCamelCase = True
if "*" in mapped_key:
__lowerCamelCase = name.split(__lowerCAmelCase )[0].split('''.''' )[-2]
__lowerCamelCase = mapped_key.replace('''*''' , __lowerCAmelCase )
if "weight_g" in name:
__lowerCamelCase = '''weight_g'''
elif "weight_v" in name:
__lowerCamelCase = '''weight_v'''
elif "bias" in name and "relative_attention_bias" not in name:
__lowerCamelCase = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCamelCase = '''weight'''
else:
__lowerCamelCase = None
set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
continue
if not is_used:
unused_weights.append(__lowerCAmelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] ) -> List[str]:
__lowerCamelCase = full_name.split('''conv_layers.''' )[-1]
__lowerCamelCase = name.split('''.''' )
__lowerCamelCase = int(items[0] )
__lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__lowerCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__lowerCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__lowerCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__lowerCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowerCAmelCase )
@torch.no_grad()
def __magic_name__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str]=None ) -> Tuple:
# load the pre-trained checkpoints
__lowerCamelCase = torch.load(__lowerCAmelCase )
__lowerCamelCase = WavLMConfigOrig(checkpoint['''cfg'''] )
__lowerCamelCase = WavLMOrig(__lowerCAmelCase )
model.load_state_dict(checkpoint['''model'''] )
model.eval()
if config_path is not None:
__lowerCamelCase = WavLMConfig.from_pretrained(__lowerCAmelCase )
else:
__lowerCamelCase = WavLMConfig()
__lowerCamelCase = WavLMModel(__lowerCAmelCase )
recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase )
hf_wavlm.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
SCREAMING_SNAKE_CASE__ : Any = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 270 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
SCREAMING_SNAKE_CASE__ : str = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any]=7 , SCREAMING_SNAKE_CASE__ : List[str]=3 , SCREAMING_SNAKE_CASE__ : Tuple=18 , SCREAMING_SNAKE_CASE__ : int=30 , SCREAMING_SNAKE_CASE__ : Any=4_00 , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , ) -> Union[str, Any]:
__lowerCamelCase = size if size is not None else {'''height''': 20, '''width''': 20}
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = num_channels
__lowerCamelCase = image_size
__lowerCamelCase = min_resolution
__lowerCamelCase = max_resolution
__lowerCamelCase = size
__lowerCamelCase = do_normalize
__lowerCamelCase = do_convert_rgb
__lowerCamelCase = [5_12, 10_24, 20_48, 40_96]
__lowerCamelCase = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
def __A ( self : Union[str, Any] ) -> Union[str, Any]:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __A ( self : int ) -> Any:
__lowerCamelCase = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
__lowerCamelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __lowercase , unittest.TestCase ):
a__ : Dict = PixaStructImageProcessor if is_vision_available() else None
def __A ( self : Optional[Any] ) -> List[Any]:
__lowerCamelCase = PixaStructImageProcessingTester(self )
@property
def __A ( self : Optional[int] ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Tuple ) -> Dict:
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_convert_rgb''' ) )
def __A ( self : int ) -> str:
__lowerCamelCase = self.image_processor_tester.prepare_dummy_image()
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
__lowerCamelCase = 20_48
__lowerCamelCase = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def __A ( self : Union[str, Any] ) -> Dict:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __A ( self : Dict ) -> str:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
__lowerCamelCase = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
__lowerCamelCase = '''Hello'''
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ , header_text=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ , header_text=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __A ( self : List[str] ) -> Any:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __A ( self : Tuple ) -> List[str]:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __lowercase , unittest.TestCase ):
a__ : Any = PixaStructImageProcessor if is_vision_available() else None
def __A ( self : Union[str, Any] ) -> Optional[Any]:
__lowerCamelCase = PixaStructImageProcessingTester(self , num_channels=4 )
__lowerCamelCase = 3
@property
def __A ( self : List[Any] ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Union[str, Any] ) -> List[str]:
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_convert_rgb''' ) )
def __A ( self : Optional[int] ) -> str:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 270 | 1 |
from ..utils import DummyObject, requires_backends
class lowerCAmelCase__ ( metaclass=__lowercase ):
a__ : Optional[int] = ["""torch""", """transformers""", """onnx"""]
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __A ( cls : Dict , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __A ( cls : List[Any] , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCAmelCase__ ( metaclass=__lowercase ):
a__ : Optional[Any] = ["""torch""", """transformers""", """onnx"""]
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[Any]:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __A ( cls : Any , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : int ) -> Tuple:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __A ( cls : Dict , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Dict:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCAmelCase__ ( metaclass=__lowercase ):
a__ : List[str] = ["""torch""", """transformers""", """onnx"""]
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[int]:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __A ( cls : List[str] , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ) -> List[Any]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __A ( cls : List[Any] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCAmelCase__ ( metaclass=__lowercase ):
a__ : List[Any] = ["""torch""", """transformers""", """onnx"""]
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __A ( cls : Dict , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[int]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __A ( cls : str , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[Any]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCAmelCase__ ( metaclass=__lowercase ):
a__ : int = ["""torch""", """transformers""", """onnx"""]
def __init__( self : Any , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[str]:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __A ( cls : Dict , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Any:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __A ( cls : Dict , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> int:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCAmelCase__ ( metaclass=__lowercase ):
a__ : str = ["""torch""", """transformers""", """onnx"""]
def __init__( self : int , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Dict ) -> str:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __A ( cls : Tuple , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> str:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __A ( cls : str , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : int ) -> str:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
| 270 |
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class lowerCAmelCase__ ( __lowercase ):
def __init__( self : int ) -> Optional[int]:
__lowerCamelCase = []
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple:
self.events.append('''on_init_end''' )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
self.events.append('''on_train_begin''' )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : int ) -> Any:
self.events.append('''on_train_end''' )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
self.events.append('''on_epoch_begin''' )
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[Any]:
self.events.append('''on_epoch_end''' )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : str ) -> List[Any]:
self.events.append('''on_step_begin''' )
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
self.events.append('''on_step_end''' )
def __A ( self : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]:
self.events.append('''on_evaluate''' )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : str ) -> str:
self.events.append('''on_predict''' )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple:
self.events.append('''on_save''' )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> List[str]:
self.events.append('''on_log''' )
def __A ( self : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str:
self.events.append('''on_prediction_step''' )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
def __A ( self : Union[str, Any] ) -> Optional[Any]:
__lowerCamelCase = tempfile.mkdtemp()
def __A ( self : int ) -> List[str]:
shutil.rmtree(self.output_dir )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any]=0 , SCREAMING_SNAKE_CASE__ : Any=0 , SCREAMING_SNAKE_CASE__ : List[str]=64 , SCREAMING_SNAKE_CASE__ : Optional[int]=64 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=False , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Union[str, Any]:
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
__lowerCamelCase = RegressionDataset(length=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = RegressionDataset(length=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = RegressionModelConfig(a=SCREAMING_SNAKE_CASE__ , b=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = RegressionPreTrainedModel(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = TrainingArguments(self.output_dir , disable_tqdm=SCREAMING_SNAKE_CASE__ , report_to=[] , **SCREAMING_SNAKE_CASE__ )
return Trainer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , train_dataset=SCREAMING_SNAKE_CASE__ , eval_dataset=SCREAMING_SNAKE_CASE__ , callbacks=SCREAMING_SNAKE_CASE__ , )
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str ) -> Optional[int]:
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
# Order doesn't matter
__lowerCamelCase = sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : cb.__name__ if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cb.__class__.__name__ )
__lowerCamelCase = sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : cb.__name__ if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cb.__class__.__name__ )
for cba, cba in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(SCREAMING_SNAKE_CASE__ , cba.__class__ )
elif not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(cba.__class__ , SCREAMING_SNAKE_CASE__ )
else:
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
__lowerCamelCase = ['''on_init_end''', '''on_train_begin''']
__lowerCamelCase = 0
__lowerCamelCase = len(trainer.get_eval_dataloader() )
__lowerCamelCase = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(SCREAMING_SNAKE_CASE__ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def __A ( self : Union[str, Any] ) -> int:
__lowerCamelCase = self.get_trainer()
__lowerCamelCase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
# Callbacks passed at init are added to the default callbacks
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
__lowerCamelCase = self.get_trainer(disable_tqdm=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
def __A ( self : List[Any] ) -> str:
__lowerCamelCase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
__lowerCamelCase = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(SCREAMING_SNAKE_CASE__ )
expected_callbacks.remove(SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.get_trainer()
__lowerCamelCase = trainer.pop_callback(SCREAMING_SNAKE_CASE__ )
self.assertEqual(cb.__class__ , SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
trainer.add_callback(SCREAMING_SNAKE_CASE__ )
expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
# We can also add, pop, or remove by instance
__lowerCamelCase = self.get_trainer()
__lowerCamelCase = trainer.callback_handler.callbacks[0]
trainer.remove_callback(SCREAMING_SNAKE_CASE__ )
expected_callbacks.remove(SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.get_trainer()
__lowerCamelCase = trainer.callback_handler.callbacks[0]
__lowerCamelCase = trainer.pop_callback(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
trainer.add_callback(SCREAMING_SNAKE_CASE__ )
expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
def __A ( self : Union[str, Any] ) -> Any:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' , category=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
# Independent log/save/eval
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='''steps''' )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='''epoch''' )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
# A bit of everything
__lowerCamelCase = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='''steps''' , )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
__lowerCamelCase = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(SCREAMING_SNAKE_CASE__ ) in warn_mock.call_args[0][0]
| 270 | 1 |
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
set_seed(770)
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"c_attn": "att_proj",
"c_proj": "out_proj",
"c_fc": "in_proj",
"transformer.": "",
"h.": "layers.",
"ln_1": "layernorm_1",
"ln_2": "layernorm_2",
"ln_f": "layernorm_final",
"wpe": "position_embeds_layer",
"wte": "input_embeds_layer",
}
SCREAMING_SNAKE_CASE__ : Any = {
"text_small": {
"repo_id": "suno/bark",
"file_name": "text.pt",
},
"coarse_small": {
"repo_id": "suno/bark",
"file_name": "coarse.pt",
},
"fine_small": {
"repo_id": "suno/bark",
"file_name": "fine.pt",
},
"text": {
"repo_id": "suno/bark",
"file_name": "text_2.pt",
},
"coarse": {
"repo_id": "suno/bark",
"file_name": "coarse_2.pt",
},
"fine": {
"repo_id": "suno/bark",
"file_name": "fine_2.pt",
},
}
SCREAMING_SNAKE_CASE__ : Any = os.path.dirname(os.path.abspath(__file__))
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(os.path.expanduser("~"), ".cache")
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0")
def __magic_name__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any]=False ) -> Optional[Any]:
__lowerCamelCase = model_type
if use_small:
key += "_small"
return os.path.join(__lowerCAmelCase , REMOTE_MODEL_PATHS[key]['''file_name'''] )
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : List[str] ) -> Union[str, Any]:
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
hf_hub_download(repo_id=__lowerCAmelCase , filename=__lowerCAmelCase , local_dir=__lowerCAmelCase )
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : str="text" ) -> List[str]:
if model_type == "text":
__lowerCamelCase = BarkSemanticModel
__lowerCamelCase = BarkSemanticConfig
__lowerCamelCase = BarkSemanticGenerationConfig
elif model_type == "coarse":
__lowerCamelCase = BarkCoarseModel
__lowerCamelCase = BarkCoarseConfig
__lowerCamelCase = BarkCoarseGenerationConfig
elif model_type == "fine":
__lowerCamelCase = BarkFineModel
__lowerCamelCase = BarkFineConfig
__lowerCamelCase = BarkFineGenerationConfig
else:
raise NotImplementedError()
__lowerCamelCase = f'''{model_type}_small''' if use_small else model_type
__lowerCamelCase = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(__lowerCAmelCase ):
logger.info(f'''{model_type} model not found, downloading into `{CACHE_DIR}`.''' )
_download(model_info['''repo_id'''] , model_info['''file_name'''] )
__lowerCamelCase = torch.load(__lowerCAmelCase , map_location=__lowerCAmelCase )
# this is a hack
__lowerCamelCase = checkpoint['''model_args''']
if "input_vocab_size" not in model_args:
__lowerCamelCase = model_args['''vocab_size''']
__lowerCamelCase = model_args['''vocab_size''']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
__lowerCamelCase = model_args.pop('''n_head''' )
__lowerCamelCase = model_args.pop('''n_embd''' )
__lowerCamelCase = model_args.pop('''n_layer''' )
__lowerCamelCase = ConfigClass(**checkpoint['''model_args'''] )
__lowerCamelCase = ModelClass(config=__lowerCAmelCase )
__lowerCamelCase = GenerationConfigClass()
__lowerCamelCase = model_generation_config
__lowerCamelCase = checkpoint['''model''']
# fixup checkpoint
__lowerCamelCase = '''_orig_mod.'''
for k, v in list(state_dict.items() ):
if k.startswith(__lowerCAmelCase ):
# replace part of the key with corresponding layer name in HF implementation
__lowerCamelCase = k[len(__lowerCAmelCase ) :]
for old_layer_name in new_layer_name_dict:
__lowerCamelCase = new_k.replace(__lowerCAmelCase , new_layer_name_dict[old_layer_name] )
__lowerCamelCase = state_dict.pop(__lowerCAmelCase )
__lowerCamelCase = set(state_dict.keys() ) - set(model.state_dict().keys() )
__lowerCamelCase = {k for k in extra_keys if not k.endswith('''.attn.bias''' )}
__lowerCamelCase = set(model.state_dict().keys() ) - set(state_dict.keys() )
__lowerCamelCase = {k for k in missing_keys if not k.endswith('''.attn.bias''' )}
if len(__lowerCAmelCase ) != 0:
raise ValueError(f'''extra keys found: {extra_keys}''' )
if len(__lowerCAmelCase ) != 0:
raise ValueError(f'''missing keys: {missing_keys}''' )
model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
__lowerCamelCase = model.num_parameters(exclude_embeddings=__lowerCAmelCase )
__lowerCamelCase = checkpoint['''best_val_loss'''].item()
logger.info(f'''model loaded: {round(n_params/1E6 , 1 )}M params, {round(__lowerCAmelCase , 3 )} loss''' )
model.eval()
model.to(__lowerCAmelCase )
del checkpoint, state_dict
return model
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : str=False , __lowerCAmelCase : List[Any]="text" ) -> List[str]:
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
__lowerCamelCase = '''cpu''' # do conversion on cpu
__lowerCamelCase = _get_ckpt_path(__lowerCAmelCase , use_small=__lowerCAmelCase )
__lowerCamelCase = _load_model(__lowerCAmelCase , __lowerCAmelCase , model_type=__lowerCAmelCase , use_small=__lowerCAmelCase )
# load bark initial model
__lowerCamelCase = _bark_load_model(__lowerCAmelCase , '''cpu''' , model_type=__lowerCAmelCase , use_small=__lowerCAmelCase )
if model_type == "text":
__lowerCamelCase = bark_model['''model''']
if model.num_parameters(exclude_embeddings=__lowerCAmelCase ) != bark_model.get_num_params():
raise ValueError('''initial and new models don\'t have the same number of parameters''' )
# check if same output as the bark model
__lowerCamelCase = 5
__lowerCamelCase = 10
if model_type in ["text", "coarse"]:
__lowerCamelCase = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
__lowerCamelCase = bark_model(__lowerCAmelCase )[0]
__lowerCamelCase = model(__lowerCAmelCase )
# take last logits
__lowerCamelCase = output_new_model_total.logits[:, [-1], :]
else:
__lowerCamelCase = 3
__lowerCamelCase = 8
__lowerCamelCase = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
__lowerCamelCase = model(__lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = bark_model(__lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('''initial and new outputs don\'t have the same shape''' )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError('''initial and new outputs are not equal''' )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
def __magic_name__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , ) -> str:
__lowerCamelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = BarkSemanticConfig.from_pretrained(os.path.join(__lowerCAmelCase , '''config.json''' ) )
__lowerCamelCase = BarkCoarseConfig.from_pretrained(os.path.join(__lowerCAmelCase , '''config.json''' ) )
__lowerCamelCase = BarkFineConfig.from_pretrained(os.path.join(__lowerCAmelCase , '''config.json''' ) )
__lowerCamelCase = EncodecConfig.from_pretrained('''facebook/encodec_24khz''' )
__lowerCamelCase = BarkSemanticModel.from_pretrained(__lowerCAmelCase )
__lowerCamelCase = BarkCoarseModel.from_pretrained(__lowerCAmelCase )
__lowerCamelCase = BarkFineModel.from_pretrained(__lowerCAmelCase )
__lowerCamelCase = EncodecModel.from_pretrained('''facebook/encodec_24khz''' )
__lowerCamelCase = BarkConfig.from_sub_model_configs(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
__lowerCamelCase = BarkModel(__lowerCAmelCase )
__lowerCamelCase = semantic
__lowerCamelCase = coarseAcoustic
__lowerCamelCase = fineAcoustic
__lowerCamelCase = codec
__lowerCamelCase = bark_generation_config
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
bark.save_pretrained(__lowerCAmelCase , repo_id=__lowerCAmelCase , push_to_hub=__lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("model_type", type=str, help="text, coarse or fine.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--is_small", action="store_true", help="convert the small version instead of the large.")
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 270 |
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class lowerCAmelCase__ :
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> Dict:
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
__lowerCamelCase = img
__lowerCamelCase = img.shape[1]
__lowerCamelCase = img.shape[0]
__lowerCamelCase = dst_width
__lowerCamelCase = dst_height
__lowerCamelCase = self.src_w / self.dst_w
__lowerCamelCase = self.src_h / self.dst_h
__lowerCamelCase = __lowerCamelCase = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 2_55
)
def __A ( self : List[Any] ) -> str:
for i in range(self.dst_h ):
for j in range(self.dst_w ):
__lowerCamelCase = self.img[self.get_y(SCREAMING_SNAKE_CASE__ )][self.get_x(SCREAMING_SNAKE_CASE__ )]
def __A ( self : str , SCREAMING_SNAKE_CASE__ : int ) -> int:
return int(self.ratio_x * x )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : int ) -> int:
return int(self.ratio_y * y )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = 800, 600
SCREAMING_SNAKE_CASE__ : int = imread("image_data/lena.jpg", 1)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F'Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}', n.output
)
waitKey(0)
destroyAllWindows()
| 270 | 1 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
SCREAMING_SNAKE_CASE__ : str = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any]=7 , SCREAMING_SNAKE_CASE__ : List[str]=3 , SCREAMING_SNAKE_CASE__ : Tuple=18 , SCREAMING_SNAKE_CASE__ : int=30 , SCREAMING_SNAKE_CASE__ : Any=4_00 , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , ) -> Union[str, Any]:
__lowerCamelCase = size if size is not None else {'''height''': 20, '''width''': 20}
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = num_channels
__lowerCamelCase = image_size
__lowerCamelCase = min_resolution
__lowerCamelCase = max_resolution
__lowerCamelCase = size
__lowerCamelCase = do_normalize
__lowerCamelCase = do_convert_rgb
__lowerCamelCase = [5_12, 10_24, 20_48, 40_96]
__lowerCamelCase = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
def __A ( self : Union[str, Any] ) -> Union[str, Any]:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __A ( self : int ) -> Any:
__lowerCamelCase = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
__lowerCamelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __lowercase , unittest.TestCase ):
a__ : Dict = PixaStructImageProcessor if is_vision_available() else None
def __A ( self : Optional[Any] ) -> List[Any]:
__lowerCamelCase = PixaStructImageProcessingTester(self )
@property
def __A ( self : Optional[int] ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Tuple ) -> Dict:
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_convert_rgb''' ) )
def __A ( self : int ) -> str:
__lowerCamelCase = self.image_processor_tester.prepare_dummy_image()
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
__lowerCamelCase = 20_48
__lowerCamelCase = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def __A ( self : Union[str, Any] ) -> Dict:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __A ( self : Dict ) -> str:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
__lowerCamelCase = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
__lowerCamelCase = '''Hello'''
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ , header_text=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ , header_text=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __A ( self : List[str] ) -> Any:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __A ( self : Tuple ) -> List[str]:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __lowercase , unittest.TestCase ):
a__ : Any = PixaStructImageProcessor if is_vision_available() else None
def __A ( self : Union[str, Any] ) -> Optional[Any]:
__lowerCamelCase = PixaStructImageProcessingTester(self , num_channels=4 )
__lowerCamelCase = 3
@property
def __A ( self : List[Any] ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Union[str, Any] ) -> List[str]:
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_convert_rgb''' ) )
def __A ( self : Optional[int] ) -> str:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 270 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def __magic_name__ ( __lowerCAmelCase : dict , __lowerCAmelCase : str , __lowerCAmelCase : set , __lowerCAmelCase : set , __lowerCAmelCase : dict , __lowerCAmelCase : dict , __lowerCAmelCase : PriorityQueue , __lowerCAmelCase : dict , __lowerCAmelCase : float | int , ) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
__lowerCamelCase = cst_fwd.get(__lowerCAmelCase , np.inf )
__lowerCamelCase = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
__lowerCamelCase = new_cost_f
__lowerCamelCase = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
__lowerCamelCase = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : dict , __lowerCAmelCase : dict ) -> int:
__lowerCamelCase = -1
__lowerCamelCase = set()
__lowerCamelCase = set()
__lowerCamelCase = {source: 0}
__lowerCamelCase = {destination: 0}
__lowerCamelCase = {source: None}
__lowerCamelCase = {destination: None}
__lowerCamelCase = PriorityQueue()
__lowerCamelCase = PriorityQueue()
__lowerCamelCase = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
__lowerCamelCase , __lowerCamelCase = queue_forward.get()
visited_forward.add(__lowerCAmelCase )
__lowerCamelCase , __lowerCamelCase = queue_backward.get()
visited_backward.add(__lowerCAmelCase )
__lowerCamelCase = pass_and_relaxation(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
__lowerCamelCase = pass_and_relaxation(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
__lowerCamelCase = shortest_distance
return shortest_path_distance
SCREAMING_SNAKE_CASE__ : List[Any] = {
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["G", 2]],
"F": [],
"G": [["F", 1]],
}
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"B": [["E", 1]],
"C": [["B", 1]],
"D": [["C", 1]],
"F": [["D", 1], ["G", 1]],
"E": [[None, np.inf]],
"G": [["E", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 270 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[Any] = {"vocab_file": "sentencepiece.model", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE__ : Dict = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
"tokenizer_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/tokenizer.json",
},
}
SCREAMING_SNAKE_CASE__ : int = {
"google/rembert": 256,
}
SCREAMING_SNAKE_CASE__ : Optional[int] = "▁"
class lowerCAmelCase__ ( __lowercase ):
a__ : Dict = VOCAB_FILES_NAMES
a__ : str = PRETRAINED_VOCAB_FILES_MAP
a__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Tuple = RemBertTokenizer
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : int="[CLS]" , SCREAMING_SNAKE_CASE__ : str="[SEP]" , SCREAMING_SNAKE_CASE__ : Any="<unk>" , SCREAMING_SNAKE_CASE__ : Dict="[SEP]" , SCREAMING_SNAKE_CASE__ : List[str]="<pad>" , SCREAMING_SNAKE_CASE__ : str="[CLS]" , SCREAMING_SNAKE_CASE__ : List[Any]="[MASK]" , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> Tuple:
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
super().__init__(
SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , remove_space=SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = do_lower_case
__lowerCamelCase = remove_space
__lowerCamelCase = keep_accents
__lowerCamelCase = vocab_file
__lowerCamelCase = False if not self.vocab_file else True
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE__ ) )
return
__lowerCamelCase = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 270 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE__ : List[Any] = TypeVar("T")
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
return (position - 1) // 2
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
return (2 * position) + 1
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
return (2 * position) + 2
class lowerCAmelCase__ ( Generic[T] ):
def __init__( self : List[str] ) -> None:
__lowerCamelCase = []
__lowerCamelCase = {}
__lowerCamelCase = 0
def __len__( self : Optional[int] ) -> int:
return self.elements
def __repr__( self : Optional[int] ) -> str:
return str(self.heap )
def __A ( self : Union[str, Any] ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def __A ( self : str , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
__lowerCamelCase = self.elements
self.elements += 1
self._bubble_up(SCREAMING_SNAKE_CASE__ )
def __A ( self : Any ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
__lowerCamelCase , __lowerCamelCase = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__lowerCamelCase , __lowerCamelCase = self.heap[0]
self._bubble_down(SCREAMING_SNAKE_CASE__ )
return elem
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Update the weight of the given key
__lowerCamelCase = self.position_map[elem]
__lowerCamelCase = (elem, weight)
if position > 0:
__lowerCamelCase = get_parent_position(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(SCREAMING_SNAKE_CASE__ )
else:
self._bubble_down(SCREAMING_SNAKE_CASE__ )
else:
self._bubble_down(SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : T ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
__lowerCamelCase = self.position_map[elem]
if curr_pos == 0:
return None
__lowerCamelCase = get_parent_position(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase = self.heap[curr_pos]
__lowerCamelCase , __lowerCamelCase = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_up(SCREAMING_SNAKE_CASE__ )
return None
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : T ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
__lowerCamelCase = self.position_map[elem]
__lowerCamelCase , __lowerCamelCase = self.heap[curr_pos]
__lowerCamelCase = get_child_left_position(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = get_child_right_position(SCREAMING_SNAKE_CASE__ )
if child_left_position < self.elements and child_right_position < self.elements:
__lowerCamelCase , __lowerCamelCase = self.heap[child_left_position]
__lowerCamelCase , __lowerCamelCase = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
if child_left_position < self.elements:
__lowerCamelCase , __lowerCamelCase = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
else:
return None
if child_right_position < self.elements:
__lowerCamelCase , __lowerCamelCase = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
return None
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Swap the nodes at the given positions
__lowerCamelCase = self.heap[nodea_pos][0]
__lowerCamelCase = self.heap[nodea_pos][0]
__lowerCamelCase , __lowerCamelCase = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__lowerCamelCase = nodea_pos
__lowerCamelCase = nodea_pos
class lowerCAmelCase__ ( Generic[T] ):
def __init__( self : Tuple ) -> None:
__lowerCamelCase = {}
__lowerCamelCase = 0
def __repr__( self : Optional[int] ) -> str:
return str(self.connections )
def __len__( self : List[str] ) -> int:
return self.nodes
def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : T ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
__lowerCamelCase = {}
self.nodes += 1
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(SCREAMING_SNAKE_CASE__ )
self.add_node(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = weight
__lowerCamelCase = weight
def __magic_name__ ( __lowerCAmelCase : GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
__lowerCamelCase = {node: maxsize for node in graph.connections}
__lowerCamelCase = {node: None for node in graph.connections}
__lowerCamelCase = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(__lowerCAmelCase , __lowerCAmelCase )
if priority_queue.is_empty():
return dist, parent
# initialization
__lowerCamelCase = priority_queue.extract_min()
__lowerCamelCase = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__lowerCamelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowerCAmelCase , dist[neighbour] )
__lowerCamelCase = node
# running prim's algorithm
while not priority_queue.is_empty():
__lowerCamelCase = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__lowerCamelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowerCAmelCase , dist[neighbour] )
__lowerCamelCase = node
return dist, parent
| 270 | 1 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __magic_name__ ( __lowerCAmelCase : str ) -> str:
__lowerCamelCase = filter(lambda __lowerCAmelCase : p.requires_grad , model.parameters() )
__lowerCamelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.getLogger(__name__)
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Dict ) -> Tuple:
if metric == "rouge2":
__lowerCamelCase = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
__lowerCamelCase = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
__lowerCamelCase = '''{val_avg_em:.4f}-{step_count}'''
elif metric == "loss":
__lowerCamelCase = '''{val_avg_loss:.4f}-{step_count}'''
else:
raise NotImplementedError(
f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
''' function.''' )
__lowerCamelCase = ModelCheckpoint(
dirpath=__lowerCAmelCase , filename=__lowerCAmelCase , monitor=f'''val_{metric}''' , mode='''max''' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : List[Any] ) -> int:
return EarlyStopping(
monitor=f'''val_{metric}''' , mode='''min''' if '''loss''' in metric else '''max''' , patience=__lowerCAmelCase , verbose=__lowerCAmelCase , )
class lowerCAmelCase__ ( pl.Callback ):
def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ) -> Union[str, Any]:
__lowerCamelCase = {f'''lr_group_{i}''': param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(SCREAMING_SNAKE_CASE__ )
@rank_zero_only
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : pl.Trainer , SCREAMING_SNAKE_CASE__ : pl.LightningModule , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str]=True ) -> None:
logger.info(f'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
__lowerCamelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
__lowerCamelCase = Path(pl_module.hparams.output_dir )
if type_path == "test":
__lowerCamelCase = od / '''test_results.txt'''
__lowerCamelCase = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__lowerCamelCase = od / f'''{type_path}_results/{trainer.global_step:05d}.txt'''
__lowerCamelCase = od / f'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
generations_file.parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , '''a+''' ) as writer:
for key in sorted(SCREAMING_SNAKE_CASE__ ):
if key in ["log", "progress_bar", "preds"]:
continue
__lowerCamelCase = metrics[key]
if isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
__lowerCamelCase = val.item()
__lowerCamelCase = f'''{key}: {val:.6f}\n'''
writer.write(SCREAMING_SNAKE_CASE__ )
if not save_generations:
return
if "preds" in metrics:
__lowerCamelCase = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(SCREAMING_SNAKE_CASE__ )
@rank_zero_only
def __A ( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Any:
try:
__lowerCamelCase = pl_module.model.model.num_parameters()
except AttributeError:
__lowerCamelCase = pl_module.model.num_parameters()
__lowerCamelCase = count_trainable_parameters(SCREAMING_SNAKE_CASE__ )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} )
@rank_zero_only
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : pl.Trainer , SCREAMING_SNAKE_CASE__ : pl.LightningModule ) -> List[str]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''test''' )
@rank_zero_only
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : pl.Trainer , SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 270 |
from __future__ import annotations
from statistics import mean
def __magic_name__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int ) -> list[int]:
__lowerCamelCase = [0] * no_of_processes
__lowerCamelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(__lowerCAmelCase ):
__lowerCamelCase = burst_time[i]
__lowerCamelCase = []
__lowerCamelCase = 0
__lowerCamelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__lowerCamelCase = []
__lowerCamelCase = -1
for i in range(__lowerCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
__lowerCamelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__lowerCamelCase = i
total_time += burst_time[target_process]
completed += 1
__lowerCamelCase = 0
__lowerCamelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def __magic_name__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : list[int] ) -> list[int]:
__lowerCamelCase = [0] * no_of_processes
for i in range(__lowerCAmelCase ):
__lowerCamelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("[TEST CASE 01]")
SCREAMING_SNAKE_CASE__ : Tuple = 4
SCREAMING_SNAKE_CASE__ : Optional[int] = [2, 5, 3, 7]
SCREAMING_SNAKE_CASE__ : List[str] = [0, 0, 0, 0]
SCREAMING_SNAKE_CASE__ : str = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time")
for i, process_id in enumerate(list(range(1, 5))):
print(
F'{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'
F'{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'
)
print(F'\nAverage waiting time = {mean(waiting_time):.5f}')
print(F'Average turnaround time = {mean(turn_around_time):.5f}')
| 270 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
SCREAMING_SNAKE_CASE__ : str = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Tuple = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE__ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 270 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowerCAmelCase__ ( __lowercase ):
@staticmethod
@abstractmethod
def __A ( SCREAMING_SNAKE_CASE__ : ArgumentParser ) -> str:
raise NotImplementedError()
@abstractmethod
def __A ( self : Optional[int] ) -> Union[str, Any]:
raise NotImplementedError()
| 270 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 270 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class lowerCAmelCase__ ( unittest.TestCase ):
def __A ( self : Optional[int] ) -> List[Any]:
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='''utf-8''' , check=SCREAMING_SNAKE_CASE__ , )
assert hasattr(self , '''env''' )
def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]:
# configuration for running training on smdistributed Model Parallel
__lowerCamelCase = {
'''enabled''': True,
'''processes_per_host''': 8,
}
__lowerCamelCase = {
'''enabled''': True,
'''parameters''': {
'''microbatches''': 4,
'''placement_strategy''': '''spread''',
'''pipeline''': '''interleaved''',
'''optimize''': '''speed''',
'''partitions''': 4,
'''ddp''': True,
},
}
__lowerCamelCase = {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options}
__lowerCamelCase = '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=SCREAMING_SNAKE_CASE__ , instance_type=self.instance_type , debugger_hook_config=SCREAMING_SNAKE_CASE__ , hyperparameters={
**self.env.hyperparameters,
'''model_name_or_path''': self.model_name_or_path,
'''max_steps''': 5_00,
} , metric_definitions=self.env.metric_definitions , distribution=SCREAMING_SNAKE_CASE__ , py_version='''py36''' , )
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[str]:
TrainingJobAnalytics(SCREAMING_SNAKE_CASE__ ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
# create estimator
__lowerCamelCase = self.create_estimator(SCREAMING_SNAKE_CASE__ )
# run training
estimator.fit()
# result dataframe
__lowerCamelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__lowerCamelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
__lowerCamelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowerCamelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , SCREAMING_SNAKE_CASE__ )
| 270 | 1 |
import functools
from typing import Any
def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : list[str] ) -> bool:
# Validation
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or len(__lowerCAmelCase ) == 0:
raise ValueError('''the string should be not empty string''' )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not all(
isinstance(__lowerCAmelCase , __lowerCAmelCase ) and len(__lowerCAmelCase ) > 0 for item in words ):
raise ValueError('''the words should be a list of non-empty strings''' )
# Build trie
__lowerCamelCase = {}
__lowerCamelCase = '''WORD_KEEPER'''
for word in words:
__lowerCamelCase = trie
for c in word:
if c not in trie_node:
__lowerCamelCase = {}
__lowerCamelCase = trie_node[c]
__lowerCamelCase = True
__lowerCamelCase = len(__lowerCAmelCase )
# Dynamic programming method
@functools.cache
def is_breakable(__lowerCAmelCase : int ) -> bool:
if index == len_string:
return True
__lowerCamelCase = trie
for i in range(__lowerCAmelCase , __lowerCAmelCase ):
__lowerCamelCase = trie_node.get(string[i] , __lowerCAmelCase )
if trie_node is None:
return False
if trie_node.get(__lowerCAmelCase , __lowerCAmelCase ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 270 |
from __future__ import annotations
from fractions import Fraction
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __magic_name__ ( __lowerCAmelCase : int ) -> list[str]:
__lowerCamelCase = []
__lowerCamelCase = 11
__lowerCamelCase = int('''1''' + '''0''' * digit_len )
for num in range(__lowerCAmelCase , __lowerCAmelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(__lowerCAmelCase , __lowerCAmelCase ):
solutions.append(f'''{num}/{den}''' )
den += 1
num += 1
__lowerCamelCase = 10
return solutions
def __magic_name__ ( __lowerCAmelCase : int = 2 ) -> int:
__lowerCamelCase = 1.0
for fraction in fraction_list(__lowerCAmelCase ):
__lowerCamelCase = Fraction(__lowerCAmelCase )
result *= frac.denominator / frac.numerator
return int(__lowerCAmelCase )
if __name__ == "__main__":
print(solution())
| 270 | 1 |
def __magic_name__ ( __lowerCAmelCase : list , __lowerCAmelCase : int = 0 ) -> list:
__lowerCamelCase = length or len(__lowerCAmelCase )
__lowerCamelCase = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
__lowerCamelCase , __lowerCamelCase = list_data[i + 1], list_data[i]
__lowerCamelCase = True
return list_data if not swapped else bubble_sort(__lowerCAmelCase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 270 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ : List[str] = {
"configuration_blip": [
"BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlipConfig",
"BlipTextConfig",
"BlipVisionConfig",
],
"processing_blip": ["BlipProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = ["BlipImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : int = [
"BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlipModel",
"BlipPreTrainedModel",
"BlipForConditionalGeneration",
"BlipForQuestionAnswering",
"BlipVisionModel",
"BlipTextModel",
"BlipForImageTextRetrieval",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Any = [
"TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBlipModel",
"TFBlipPreTrainedModel",
"TFBlipForConditionalGeneration",
"TFBlipForQuestionAnswering",
"TFBlipVisionModel",
"TFBlipTextModel",
"TFBlipForImageTextRetrieval",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 270 | 1 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "\\n@inproceedings{popovic-2015-chrf,\n title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",\n month = sep,\n year = \"2015\",\n address = \"Lisbon, Portugal\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W15-3049\",\n doi = \"10.18653/v1/W15-3049\",\n pages = \"392--395\",\n}\n@inproceedings{popovic-2017-chrf,\n title = \"chr{F}++: words helping character n-grams\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Second Conference on Machine Translation\",\n month = sep,\n year = \"2017\",\n address = \"Copenhagen, Denmark\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W17-4770\",\n doi = \"10.18653/v1/W17-4770\",\n pages = \"612--618\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
SCREAMING_SNAKE_CASE__ : int = "\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n"
SCREAMING_SNAKE_CASE__ : Optional[Any] = "\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n 'score' (float): The chrF (chrF++) score,\n 'char_order' (int): The character n-gram order,\n 'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n 'beta' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def __A ( self : Dict ) -> Tuple:
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/mjpost/sacreBLEU#chrf--chrf''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#chrf--chrf'''] , reference_urls=[
'''https://github.com/m-popovic/chrF''',
] , )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int = CHRF.CHAR_ORDER , SCREAMING_SNAKE_CASE__ : int = CHRF.WORD_ORDER , SCREAMING_SNAKE_CASE__ : int = CHRF.BETA , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , ) -> Optional[Any]:
__lowerCamelCase = len(references[0] )
if any(len(SCREAMING_SNAKE_CASE__ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
__lowerCamelCase = [[refs[i] for refs in references] for i in range(SCREAMING_SNAKE_CASE__ )]
__lowerCamelCase = CHRF(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = sb_chrf.corpus_score(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 270 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"configuration_wavlm": ["WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "WavLMConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : int = [
"WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"WavLMForAudioFrameClassification",
"WavLMForCTC",
"WavLMForSequenceClassification",
"WavLMForXVector",
"WavLMModel",
"WavLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 270 | 1 |
def __magic_name__ ( __lowerCAmelCase : list ) -> list:
__lowerCamelCase = len(__lowerCAmelCase )
for i in range(1 , __lowerCAmelCase ):
__lowerCamelCase = collection[i]
__lowerCamelCase = 0
__lowerCamelCase = i - 1
while low <= high:
__lowerCamelCase = (low + high) // 2
if val < collection[mid]:
__lowerCamelCase = mid - 1
else:
__lowerCamelCase = mid + 1
for j in range(__lowerCAmelCase , __lowerCAmelCase , -1 ):
__lowerCamelCase = collection[j - 1]
__lowerCamelCase = val
return collection
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Tuple = input("Enter numbers separated by a comma:\n").strip()
SCREAMING_SNAKE_CASE__ : Optional[Any] = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 270 |
import pprint
import requests
SCREAMING_SNAKE_CASE__ : str = "https://zenquotes.io/api"
def __magic_name__ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def __magic_name__ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : int = random_quotes()
pprint.pprint(response)
| 270 | 1 |
def __magic_name__ ( __lowerCAmelCase : int = 6008_5147_5143 ) -> int:
try:
__lowerCamelCase = int(__lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
__lowerCamelCase = 2
__lowerCamelCase = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
__lowerCamelCase = i
while n % i == 0:
__lowerCamelCase = n // i
i += 1
return int(__lowerCAmelCase )
if __name__ == "__main__":
print(F'{solution() = }')
| 270 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
__lowerCamelCase = prime_factors(__lowerCAmelCase )
if is_square_free(__lowerCAmelCase ):
return -1 if len(__lowerCAmelCase ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 270 | 1 |
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __A ( self : int ) -> int:
__lowerCamelCase = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
__lowerCamelCase = AutoTokenizer.from_pretrained('''xlm-roberta-base''' )
__lowerCamelCase = '''The dog is cute and lives in the garden house'''
__lowerCamelCase = jnp.array([tokenizer.encode(SCREAMING_SNAKE_CASE__ )] )
__lowerCamelCase = (1, 12, 7_68) # batch_size, sequence_length, embedding_vector_dim
__lowerCamelCase = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )['''last_hidden_state''']
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
| 270 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
SCREAMING_SNAKE_CASE__ : Union[str, Any] = re.compile(r"\b(a|an|the)\b", re.UNICODE)
SCREAMING_SNAKE_CASE__ : int = None
def __magic_name__ ( ) -> str:
__lowerCamelCase = argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''' )
parser.add_argument('''data_file''' , metavar='''data.json''' , help='''Input data JSON file.''' )
parser.add_argument('''pred_file''' , metavar='''pred.json''' , help='''Model predictions.''' )
parser.add_argument(
'''--out-file''' , '''-o''' , metavar='''eval.json''' , help='''Write accuracy metrics to file (default is stdout).''' )
parser.add_argument(
'''--na-prob-file''' , '''-n''' , metavar='''na_prob.json''' , help='''Model estimates of probability of no answer.''' )
parser.add_argument(
'''--na-prob-thresh''' , '''-t''' , type=__lowerCAmelCase , default=1.0 , help='''Predict "" if no-answer probability exceeds this (default = 1.0).''' , )
parser.add_argument(
'''--out-image-dir''' , '''-p''' , metavar='''out_images''' , default=__lowerCAmelCase , help='''Save precision-recall curves to directory.''' )
parser.add_argument('''--verbose''' , '''-v''' , action='''store_true''' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Union[str, Any]:
__lowerCamelCase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__lowerCamelCase = bool(qa['''answers''']['''text'''] )
return qid_to_has_ans
def __magic_name__ ( __lowerCAmelCase : Dict ) -> Optional[Any]:
def remove_articles(__lowerCAmelCase : Optional[int] ):
return ARTICLES_REGEX.sub(''' ''' , __lowerCAmelCase )
def white_space_fix(__lowerCAmelCase : Optional[int] ):
return " ".join(text.split() )
def remove_punc(__lowerCAmelCase : Union[str, Any] ):
__lowerCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCAmelCase : Dict ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCAmelCase ) ) ) )
def __magic_name__ ( __lowerCAmelCase : List[Any] ) -> Optional[int]:
if not s:
return []
return normalize_answer(__lowerCAmelCase ).split()
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple ) -> int:
return int(normalize_answer(__lowerCAmelCase ) == normalize_answer(__lowerCAmelCase ) )
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ) -> str:
__lowerCamelCase = get_tokens(__lowerCAmelCase )
__lowerCamelCase = get_tokens(__lowerCAmelCase )
__lowerCamelCase = collections.Counter(__lowerCAmelCase ) & collections.Counter(__lowerCAmelCase )
__lowerCamelCase = sum(common.values() )
if len(__lowerCAmelCase ) == 0 or len(__lowerCAmelCase ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
__lowerCamelCase = 1.0 * num_same / len(__lowerCAmelCase )
__lowerCamelCase = 1.0 * num_same / len(__lowerCAmelCase )
__lowerCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ) -> Optional[Any]:
__lowerCamelCase = {}
__lowerCamelCase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__lowerCamelCase = qa['''id''']
__lowerCamelCase = [t for t in qa['''answers''']['''text'''] if normalize_answer(__lowerCAmelCase )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
__lowerCamelCase = ['''''']
if qid not in preds:
print(f'''Missing prediction for {qid}''' )
continue
__lowerCamelCase = preds[qid]
# Take max over all gold answers
__lowerCamelCase = max(compute_exact(__lowerCAmelCase , __lowerCAmelCase ) for a in gold_answers )
__lowerCamelCase = max(compute_fa(__lowerCAmelCase , __lowerCAmelCase ) for a in gold_answers )
return exact_scores, fa_scores
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] ) -> List[str]:
__lowerCamelCase = {}
for qid, s in scores.items():
__lowerCamelCase = na_probs[qid] > na_prob_thresh
if pred_na:
__lowerCamelCase = float(not qid_to_has_ans[qid] )
else:
__lowerCamelCase = s
return new_scores
def __magic_name__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any]=None ) -> Union[str, Any]:
if not qid_list:
__lowerCamelCase = len(__lowerCAmelCase )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores.values() ) / total),
('''f1''', 100.0 * sum(fa_scores.values() ) / total),
('''total''', total),
] )
else:
__lowerCamelCase = len(__lowerCAmelCase )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('''f1''', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('''total''', total),
] )
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] ) -> int:
for k in new_eval:
__lowerCamelCase = new_eval[k]
def __magic_name__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ) -> Optional[Any]:
plt.step(__lowerCAmelCase , __lowerCAmelCase , color='''b''' , alpha=0.2 , where='''post''' )
plt.fill_between(__lowerCAmelCase , __lowerCAmelCase , step='''post''' , alpha=0.2 , color='''b''' )
plt.xlabel('''Recall''' )
plt.ylabel('''Precision''' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__lowerCAmelCase )
plt.savefig(__lowerCAmelCase )
plt.clf()
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Tuple=None ) -> int:
__lowerCamelCase = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : na_probs[k] )
__lowerCamelCase = 0.0
__lowerCamelCase = 1.0
__lowerCamelCase = 0.0
__lowerCamelCase = [1.0]
__lowerCamelCase = [0.0]
__lowerCamelCase = 0.0
for i, qid in enumerate(__lowerCAmelCase ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
__lowerCamelCase = true_pos / float(i + 1 )
__lowerCamelCase = true_pos / float(__lowerCAmelCase )
if i == len(__lowerCAmelCase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__lowerCAmelCase )
recalls.append(__lowerCAmelCase )
if out_image:
plot_pr_curve(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return {"ap": 100.0 * avg_prec}
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] ) -> List[Any]:
if out_image_dir and not os.path.exists(__lowerCAmelCase ):
os.makedirs(__lowerCAmelCase )
__lowerCamelCase = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_exact.png''' ) , title='''Precision-Recall curve for Exact Match score''' , )
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_f1.png''' ) , title='''Precision-Recall curve for F1 score''' , )
__lowerCamelCase = {k: float(__lowerCAmelCase ) for k, v in qid_to_has_ans.items()}
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_oracle.png''' ) , title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''' , )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_exact''' )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_f1''' )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_oracle''' )
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int ) -> Optional[Any]:
if not qid_list:
return
__lowerCamelCase = [na_probs[k] for k in qid_list]
__lowerCamelCase = np.ones_like(__lowerCAmelCase ) / float(len(__lowerCAmelCase ) )
plt.hist(__lowerCAmelCase , weights=__lowerCAmelCase , bins=20 , range=(0.0, 1.0) )
plt.xlabel('''Model probability of no-answer''' )
plt.ylabel('''Proportion of dataset''' )
plt.title(f'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(__lowerCAmelCase , f'''na_prob_hist_{name}.png''' ) )
plt.clf()
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
__lowerCamelCase = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
__lowerCamelCase = num_no_ans
__lowerCamelCase = cur_score
__lowerCamelCase = 0.0
__lowerCamelCase = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : na_probs[k] )
for i, qid in enumerate(__lowerCAmelCase ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
__lowerCamelCase = scores[qid]
else:
if preds[qid]:
__lowerCamelCase = -1
else:
__lowerCamelCase = 0
cur_score += diff
if cur_score > best_score:
__lowerCamelCase = cur_score
__lowerCamelCase = na_probs[qid]
return 100.0 * best_score / len(__lowerCAmelCase ), best_thresh
def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] ) -> int:
__lowerCamelCase , __lowerCamelCase = find_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase , __lowerCamelCase = find_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = best_exact
__lowerCamelCase = exact_thresh
__lowerCamelCase = best_fa
__lowerCamelCase = fa_thresh
def __magic_name__ ( ) -> Optional[int]:
with open(OPTS.data_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
__lowerCamelCase = dataset_json['''data''']
with open(OPTS.pred_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
else:
__lowerCamelCase = {k: 0.0 for k in preds}
__lowerCamelCase = make_qid_to_has_ans(__lowerCAmelCase ) # maps qid to True/False
__lowerCamelCase = [k for k, v in qid_to_has_ans.items() if v]
__lowerCamelCase = [k for k, v in qid_to_has_ans.items() if not v]
__lowerCamelCase , __lowerCamelCase = get_raw_scores(__lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = apply_no_ans_threshold(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.na_prob_thresh )
__lowerCamelCase = apply_no_ans_threshold(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.na_prob_thresh )
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase )
if has_ans_qids:
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase , qid_list=__lowerCAmelCase )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''HasAns''' )
if no_ans_qids:
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase , qid_list=__lowerCAmelCase )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''NoAns''' )
if OPTS.na_prob_file:
find_all_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir )
histogram_na_prob(__lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir , '''hasAns''' )
histogram_na_prob(__lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir , '''noAns''' )
if OPTS.out_file:
with open(OPTS.out_file , '''w''' ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
else:
print(json.dumps(__lowerCAmelCase , indent=2 ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 270 | 1 |
from random import randint, random
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : int = 5 , ) -> list:
__lowerCamelCase = [[-1] * number_of_cells] # Create a highway without any car
__lowerCamelCase = 0
__lowerCamelCase = max(__lowerCAmelCase , 0 )
while i < number_of_cells:
__lowerCamelCase = (
randint(0 , __lowerCAmelCase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def __magic_name__ ( __lowerCAmelCase : list , __lowerCAmelCase : int ) -> int:
__lowerCamelCase = 0
__lowerCamelCase = highway_now[car_index + 1 :]
for cell in range(len(__lowerCAmelCase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(__lowerCAmelCase , -1 )
def __magic_name__ ( __lowerCAmelCase : list , __lowerCAmelCase : float , __lowerCAmelCase : int ) -> list:
__lowerCamelCase = len(__lowerCAmelCase )
# Beforce calculations, the highway is empty
__lowerCamelCase = [-1] * number_of_cells
for car_index in range(__lowerCAmelCase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
__lowerCamelCase = min(highway_now[car_index] + 1 , __lowerCAmelCase )
# Number of empty cell before the next car
__lowerCamelCase = get_distance(__lowerCAmelCase , __lowerCAmelCase ) - 1
# We can't have the car causing an accident
__lowerCamelCase = min(next_highway[car_index] , __lowerCAmelCase )
if random() < probability:
# Randomly, a driver will slow down
__lowerCamelCase = max(next_highway[car_index] - 1 , 0 )
return next_highway
def __magic_name__ ( __lowerCAmelCase : list , __lowerCAmelCase : int , __lowerCAmelCase : float , __lowerCAmelCase : int ) -> list:
__lowerCamelCase = len(highway[0] )
for i in range(__lowerCAmelCase ):
__lowerCamelCase = update(highway[i] , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = [-1] * number_of_cells
for car_index in range(__lowerCAmelCase ):
__lowerCamelCase = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
__lowerCamelCase = (car_index + speed) % number_of_cells
# Commit the change of position
__lowerCamelCase = speed
highway.append(__lowerCAmelCase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 270 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F'{bindir}/../../examples/pytorch/translation'):
from run_translation import main # noqa
set_seed(42)
SCREAMING_SNAKE_CASE__ : Any = "sshleifer/student_marian_en_ro_6_1"
SCREAMING_SNAKE_CASE__ : Tuple = "sshleifer/tiny-mbart"
@require_torch
class lowerCAmelCase__ ( __lowercase ):
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Optional[int]=True , ) -> Optional[int]:
__lowerCamelCase = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=SCREAMING_SNAKE_CASE__ , num_train_epochs=1 , distributed=SCREAMING_SNAKE_CASE__ , extra_args_str=SCREAMING_SNAKE_CASE__ , predict_with_generate=SCREAMING_SNAKE_CASE__ , do_train=SCREAMING_SNAKE_CASE__ , do_eval=SCREAMING_SNAKE_CASE__ , do_predict=SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = TrainerState.load_from_json(os.path.join(SCREAMING_SNAKE_CASE__ , '''trainer_state.json''' ) ).log_history
if not do_eval:
return
__lowerCamelCase = [log for log in logs if '''eval_loss''' in log.keys()]
__lowerCamelCase = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
__lowerCamelCase = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , SCREAMING_SNAKE_CASE__ )
assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def __A ( self : Optional[int] ) -> int:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def __A ( self : int ) -> List[str]:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ )
@require_torch_multi_gpu
def __A ( self : Optional[Any] ) -> Tuple:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __A ( self : Dict ) -> Tuple:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--sharded_ddp simple''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __A ( self : Optional[int] ) -> List[str]:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--sharded_ddp simple --fp16''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __A ( self : Tuple ) -> Any:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=SCREAMING_SNAKE_CASE__ )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __A ( self : Dict ) -> Tuple:
self.run_seqaseq_quick(
distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=SCREAMING_SNAKE_CASE__ )
@require_apex
@require_torch_gpu
def __A ( self : Union[str, Any] ) -> List[str]:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--fp16 --fp16_backend=apex''' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--fp16 --fp16_backend=apex''' )
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] )
@require_torch_multi_gpu
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[Any]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
__lowerCamelCase = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
__lowerCamelCase = experiments[experiment_id]
__lowerCamelCase = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
__lowerCamelCase = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**SCREAMING_SNAKE_CASE__ , extra_args_str=data['''extra_args_str'''] )
__lowerCamelCase = len(re.findall(SCREAMING_SNAKE_CASE__ , cl.err ) )
self.assertEqual(SCREAMING_SNAKE_CASE__ , data['''n_matches'''] )
@slow
def __A ( self : Any ) -> Optional[Any]:
__lowerCamelCase = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=SCREAMING_SNAKE_CASE__ , learning_rate=3e-4 , num_train_epochs=10 , distributed=SCREAMING_SNAKE_CASE__ , )
# Check metrics
__lowerCamelCase = TrainerState.load_from_json(os.path.join(SCREAMING_SNAKE_CASE__ , '''trainer_state.json''' ) ).log_history
__lowerCamelCase = [log for log in logs if '''eval_loss''' in log.keys()]
__lowerCamelCase = eval_metrics[0]
__lowerCamelCase = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , SCREAMING_SNAKE_CASE__ )
# test if do_predict saves generations and metrics
__lowerCamelCase = os.listdir(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = {os.path.basename(SCREAMING_SNAKE_CASE__ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def __A ( self : Optional[int] ) -> str:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(SCREAMING_SNAKE_CASE__ : str ) -> Tuple[int, float]:
__lowerCamelCase = '''--skip_memory_metrics 0'''
__lowerCamelCase = self.run_trainer(
max_len=1_28 , model_name=SCREAMING_SNAKE_CASE__ , learning_rate=3e-4 , num_train_epochs=1 , optim=SCREAMING_SNAKE_CASE__ , distributed=SCREAMING_SNAKE_CASE__ , extra_args_str=SCREAMING_SNAKE_CASE__ , do_eval=SCREAMING_SNAKE_CASE__ , do_predict=SCREAMING_SNAKE_CASE__ , n_gpus_to_use=1 , )
# Check metrics
__lowerCamelCase = TrainerState.load_from_json(Path(SCREAMING_SNAKE_CASE__ , '''trainer_state.json''' ) ).log_history
__lowerCamelCase = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**20 )
__lowerCamelCase = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**20 )
__lowerCamelCase = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
__lowerCamelCase = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
__lowerCamelCase = gpu_peak_mem_orig + gpu_alloc_mem_orig
__lowerCamelCase = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
__lowerCamelCase = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
__lowerCamelCase = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
f''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
f''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
f''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
f''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , f'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float = 3e-3 , SCREAMING_SNAKE_CASE__ : str = "adafactor" , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : int = None , ) -> List[Any]:
__lowerCamelCase = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
__lowerCamelCase = self.get_auto_remove_tmp_dir()
__lowerCamelCase = f'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(SCREAMING_SNAKE_CASE__ )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(SCREAMING_SNAKE_CASE__ )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
__lowerCamelCase = f'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(SCREAMING_SNAKE_CASE__ )}
'''.split()
__lowerCamelCase = '''
--do_predict
'''.split()
__lowerCamelCase = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
__lowerCamelCase = get_gpu_count()
__lowerCamelCase = get_torch_dist_unique_port()
__lowerCamelCase = f'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
__lowerCamelCase = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=self.get_env() )
else:
__lowerCamelCase = ['''run_translation.py'''] + args
with patch.object(SCREAMING_SNAKE_CASE__ , '''argv''' , SCREAMING_SNAKE_CASE__ ):
main()
return output_dir
| 270 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.