code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class _A ( unittest.TestCase ): """simple docstring""" def _a ( self : Tuple ) -> List[Any]: # For consistency across different places the DisjunctiveConstraint is called, # dc.token_ids is a list of integers. It is also initialized only by integers. __UpperCAmelCase =[[1, 2, 4], [1, 2, 3, 4]] __UpperCAmelCase =DisjunctiveConstraint(__SCREAMING_SNAKE_CASE ) self.assertTrue(isinstance(dc.token_ids , __SCREAMING_SNAKE_CASE ) ) with self.assertRaises(__SCREAMING_SNAKE_CASE ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(__SCREAMING_SNAKE_CASE ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def _a ( self : Union[str, Any] ) -> Optional[Any]: # We can't have constraints that are complete subsets of another. This leads to a preverse # interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint? # It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially # fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm # will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it). __UpperCAmelCase =[[1, 2], [1, 2, 3, 4]] with self.assertRaises(__SCREAMING_SNAKE_CASE ): DisjunctiveConstraint(__SCREAMING_SNAKE_CASE ) # fails here def _a ( self : int ) -> Tuple: __UpperCAmelCase =[[1, 2, 3], [1, 2, 4]] __UpperCAmelCase =DisjunctiveConstraint(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =dc.update(1 ) __UpperCAmelCase =stepped is True and completed is False and reset is False self.assertTrue(__SCREAMING_SNAKE_CASE ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =dc.update(2 ) __UpperCAmelCase =stepped is True and completed is False and reset is False self.assertTrue(__SCREAMING_SNAKE_CASE ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =dc.update(3 ) __UpperCAmelCase =stepped is True and completed is True and reset is False self.assertTrue(__SCREAMING_SNAKE_CASE ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def _a ( self : Optional[Any] ) -> Optional[int]: __UpperCAmelCase =[[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] __UpperCAmelCase =DisjunctiveConstraint(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
68
from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"} class _A ( UpperCamelCase ): """simple docstring""" lowerCamelCase : Tuple = 'ctrl' lowerCamelCase : Any = ['past_key_values'] lowerCamelCase : Optional[int] = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=246534 , __SCREAMING_SNAKE_CASE : int=256 , __SCREAMING_SNAKE_CASE : Optional[Any]=1280 , __SCREAMING_SNAKE_CASE : Optional[Any]=8192 , __SCREAMING_SNAKE_CASE : int=48 , __SCREAMING_SNAKE_CASE : Union[str, Any]=16 , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=1e-6 , __SCREAMING_SNAKE_CASE : List[str]=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , **__SCREAMING_SNAKE_CASE : int , ) -> Any: __UpperCAmelCase =vocab_size __UpperCAmelCase =n_positions __UpperCAmelCase =n_embd __UpperCAmelCase =n_layer __UpperCAmelCase =n_head __UpperCAmelCase =dff __UpperCAmelCase =resid_pdrop __UpperCAmelCase =embd_pdrop __UpperCAmelCase =layer_norm_epsilon __UpperCAmelCase =initializer_range __UpperCAmelCase =use_cache super().__init__(**__SCREAMING_SNAKE_CASE )
68
1
import os def lowercase__ ( ) -> Optional[Any]: """simple docstring""" with open(os.path.dirname(A_ ) + """/p022_names.txt""" ) as file: __UpperCAmelCase =str(file.readlines()[0] ) __UpperCAmelCase =names.replace("""\"""" , """""" ).split(""",""" ) names.sort() __UpperCAmelCase =0 __UpperCAmelCase =0 for i, name in enumerate(A_ ): for letter in name: name_score += ord(A_ ) - 64 total_score += (i + 1) * name_score __UpperCAmelCase =0 return total_score if __name__ == "__main__": print(solution())
68
import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() __A = logging.get_logger(__name__) __A = [ ["attention", "attn"], ["encoder_attention", "encoder_attn"], ["q_lin", "q_proj"], ["k_lin", "k_proj"], ["v_lin", "v_proj"], ["out_lin", "out_proj"], ["norm_embeddings", "layernorm_embedding"], ["position_embeddings", "embed_positions"], ["embeddings", "embed_tokens"], ["ffn.lin", "fc"], ] def lowercase__ ( A_: Optional[Any] ) -> Union[str, Any]: """simple docstring""" if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: __UpperCAmelCase =k.replace(A_ , A_ ) if k.startswith("""encoder""" ): __UpperCAmelCase =k.replace(""".attn""" , """.self_attn""" ) __UpperCAmelCase =k.replace("""norm1""" , """self_attn_layer_norm""" ) __UpperCAmelCase =k.replace("""norm2""" , """final_layer_norm""" ) elif k.startswith("""decoder""" ): __UpperCAmelCase =k.replace("""norm1""" , """self_attn_layer_norm""" ) __UpperCAmelCase =k.replace("""norm2""" , """encoder_attn_layer_norm""" ) __UpperCAmelCase =k.replace("""norm3""" , """final_layer_norm""" ) return k def lowercase__ ( A_: Tuple ) -> str: """simple docstring""" __UpperCAmelCase =[ """model.encoder.layernorm_embedding.weight""", """model.encoder.layernorm_embedding.bias""", """model.decoder.layernorm_embedding.weight""", """model.decoder.layernorm_embedding.bias""", ] for k in keys: __UpperCAmelCase =sd.pop(A_ ) __UpperCAmelCase =k.replace("""layernorm_embedding""" , """layer_norm""" ) assert new_k not in sd __UpperCAmelCase =v __A = ["START"] @torch.no_grad() def lowercase__ ( A_: List[Any] , A_: str , A_: int ) -> Optional[int]: """simple docstring""" __UpperCAmelCase =torch.load(A_ , map_location="""cpu""" ) __UpperCAmelCase =model["""model"""] __UpperCAmelCase =BlenderbotConfig.from_json_file(A_ ) __UpperCAmelCase =BlenderbotForConditionalGeneration(A_ ) __UpperCAmelCase =m.model.state_dict().keys() __UpperCAmelCase =[] __UpperCAmelCase ={} for k, v in sd.items(): if k in IGNORE_KEYS: continue __UpperCAmelCase =rename_state_dict_key(A_ ) if new_k not in valid_keys: failures.append([k, new_k] ) else: __UpperCAmelCase =v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(A_ ) m.model.load_state_dict(A_ , strict=A_ ) m.half() m.save_pretrained(A_ ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin") parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.") parser.add_argument( "--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use" ) __A = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
68
1
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def lowercase__ ( ) -> List[Any]: """simple docstring""" __UpperCAmelCase =ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=A_ ) __UpperCAmelCase =parser.add_subparsers(help="""accelerate command helpers""" ) # Register commands get_config_parser(subparsers=A_ ) env_command_parser(subparsers=A_ ) launch_command_parser(subparsers=A_ ) tpu_command_parser(subparsers=A_ ) test_command_parser(subparsers=A_ ) # Let's go __UpperCAmelCase =parser.parse_args() if not hasattr(A_ , """func""" ): parser.print_help() exit(1 ) # Run args.func(A_ ) if __name__ == "__main__": main()
68
from itertools import permutations def lowercase__ ( A_: tuple ) -> bool: """simple docstring""" if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False __UpperCAmelCase =[7, 11, 13, 17] for i, test in enumerate(A_ ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def lowercase__ ( A_: int = 10 ) -> int: """simple docstring""" return sum( int("""""".join(map(A_ , A_ ) ) ) for num in permutations(range(A_ ) ) if is_substring_divisible(A_ ) ) if __name__ == "__main__": print(F"""{solution() = }""")
68
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __A = { "configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"], "processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["VisionTextDualEncoderModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["FlaxVisionTextDualEncoderModel"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["TFVisionTextDualEncoderModel"] if TYPE_CHECKING: from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure)
68
from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar __A = TypeVar("T") def lowercase__ ( A_: int ) -> int: """simple docstring""" return (position - 1) // 2 def lowercase__ ( A_: int ) -> int: """simple docstring""" return (2 * position) + 1 def lowercase__ ( A_: int ) -> int: """simple docstring""" return (2 * position) + 2 class _A ( Generic[T] ): """simple docstring""" def __init__( self : List[str] ) -> None: __UpperCAmelCase =[] __UpperCAmelCase ={} __UpperCAmelCase =0 def __len__( self : str ) -> int: return self.elements def __repr__( self : Dict ) -> str: return str(self.heap ) def _a ( self : Optional[int] ) -> bool: # Check if the priority queue is empty return self.elements == 0 def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None: # Add an element with given priority to the queue self.heap.append((elem, weight) ) __UpperCAmelCase =self.elements self.elements += 1 self._bubble_up(__SCREAMING_SNAKE_CASE ) def _a ( self : Optional[int] ) -> T: # Remove and return the element with lowest weight (highest priority) if self.elements > 1: self._swap_nodes(0 , self.elements - 1 ) __UpperCAmelCase , __UpperCAmelCase =self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: __UpperCAmelCase , __UpperCAmelCase =self.heap[0] self._bubble_down(__SCREAMING_SNAKE_CASE ) return elem def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None: # Update the weight of the given key __UpperCAmelCase =self.position_map[elem] __UpperCAmelCase =(elem, weight) if position > 0: __UpperCAmelCase =get_parent_position(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase , __UpperCAmelCase =self.heap[parent_position] if parent_weight > weight: self._bubble_up(__SCREAMING_SNAKE_CASE ) else: self._bubble_down(__SCREAMING_SNAKE_CASE ) else: self._bubble_down(__SCREAMING_SNAKE_CASE ) def _a ( self : Any , __SCREAMING_SNAKE_CASE : T ) -> None: # Place a node at the proper position (upward movement) [to be used internally # only] __UpperCAmelCase =self.position_map[elem] if curr_pos == 0: return None __UpperCAmelCase =get_parent_position(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase , __UpperCAmelCase =self.heap[curr_pos] __UpperCAmelCase , __UpperCAmelCase =self.heap[parent_position] if parent_weight > weight: self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return self._bubble_up(__SCREAMING_SNAKE_CASE ) return None def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : T ) -> None: # Place a node at the proper position (downward movement) [to be used # internally only] __UpperCAmelCase =self.position_map[elem] __UpperCAmelCase , __UpperCAmelCase =self.heap[curr_pos] __UpperCAmelCase =get_child_left_position(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =get_child_right_position(__SCREAMING_SNAKE_CASE ) if child_left_position < self.elements and child_right_position < self.elements: __UpperCAmelCase , __UpperCAmelCase =self.heap[child_left_position] __UpperCAmelCase , __UpperCAmelCase =self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return self._bubble_down(__SCREAMING_SNAKE_CASE ) if child_left_position < self.elements: __UpperCAmelCase , __UpperCAmelCase =self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return self._bubble_down(__SCREAMING_SNAKE_CASE ) else: return None if child_right_position < self.elements: __UpperCAmelCase , __UpperCAmelCase =self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return self._bubble_down(__SCREAMING_SNAKE_CASE ) return None def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> None: # Swap the nodes at the given positions __UpperCAmelCase =self.heap[nodea_pos][0] __UpperCAmelCase =self.heap[nodea_pos][0] __UpperCAmelCase , __UpperCAmelCase =( self.heap[nodea_pos], self.heap[nodea_pos], ) __UpperCAmelCase =nodea_pos __UpperCAmelCase =nodea_pos class _A ( Generic[T] ): """simple docstring""" def __init__( self : List[Any] ) -> None: __UpperCAmelCase ={} __UpperCAmelCase =0 def __repr__( self : Tuple ) -> str: return str(self.connections ) def __len__( self : str ) -> int: return self.nodes def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : T ) -> None: # Add a node in the graph if it is not in the graph if node not in self.connections: __UpperCAmelCase ={} self.nodes += 1 def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None: # Add an edge between 2 nodes in the graph self.add_node(__SCREAMING_SNAKE_CASE ) self.add_node(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =weight __UpperCAmelCase =weight def lowercase__ ( A_: GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]: """simple docstring""" __UpperCAmelCase ={node: maxsize for node in graph.connections} __UpperCAmelCase ={node: None for node in graph.connections} __UpperCAmelCase =MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(A_ , A_ ) if priority_queue.is_empty(): return dist, parent # initialization __UpperCAmelCase =priority_queue.extract_min() __UpperCAmelCase =0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: __UpperCAmelCase =dist[node] + graph.connections[node][neighbour] priority_queue.update_key(A_ , dist[neighbour] ) __UpperCAmelCase =node # running prim's algorithm while not priority_queue.is_empty(): __UpperCAmelCase =priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: __UpperCAmelCase =dist[node] + graph.connections[node][neighbour] priority_queue.update_key(A_ , dist[neighbour] ) __UpperCAmelCase =node return dist, parent
68
1
from __future__ import annotations import pandas as pd def lowercase__ ( A_: list[int] , A_: list[int] , A_: int ) -> list[int]: """simple docstring""" __UpperCAmelCase =[0] * no_of_processes __UpperCAmelCase =[0] * no_of_processes # Copy the burst time into remaining_time[] for i in range(A_ ): __UpperCAmelCase =burst_time[i] __UpperCAmelCase =0 __UpperCAmelCase =0 __UpperCAmelCase =999999999 __UpperCAmelCase =0 __UpperCAmelCase =False # Process until all processes are completed while complete != no_of_processes: for j in range(A_ ): if arrival_time[j] <= increment_time and remaining_time[j] > 0: if remaining_time[j] < minm: __UpperCAmelCase =remaining_time[j] __UpperCAmelCase =j __UpperCAmelCase =True if not check: increment_time += 1 continue remaining_time[short] -= 1 __UpperCAmelCase =remaining_time[short] if minm == 0: __UpperCAmelCase =999999999 if remaining_time[short] == 0: complete += 1 __UpperCAmelCase =False # Find finish time of current process __UpperCAmelCase =increment_time + 1 # Calculate waiting time __UpperCAmelCase =finish_time - arrival_time[short] __UpperCAmelCase =finar - burst_time[short] if waiting_time[short] < 0: __UpperCAmelCase =0 # Increment time increment_time += 1 return waiting_time def lowercase__ ( A_: list[int] , A_: int , A_: list[int] ) -> list[int]: """simple docstring""" __UpperCAmelCase =[0] * no_of_processes for i in range(A_ ): __UpperCAmelCase =burst_time[i] + waiting_time[i] return turn_around_time def lowercase__ ( A_: list[int] , A_: list[int] , A_: int ) -> None: """simple docstring""" __UpperCAmelCase =0 __UpperCAmelCase =0 for i in range(A_ ): __UpperCAmelCase =total_waiting_time + waiting_time[i] __UpperCAmelCase =total_turn_around_time + turn_around_time[i] print(F'''Average waiting time = {total_waiting_time / no_of_processes:.5f}''' ) print("""Average turn around time =""" , total_turn_around_time / no_of_processes ) if __name__ == "__main__": print("Enter how many process you want to analyze") __A = int(input()) __A = [0] * no_of_processes __A = [0] * no_of_processes __A = list(range(1, no_of_processes + 1)) for i in range(no_of_processes): print("Enter the arrival time and burst time for process:--" + str(i + 1)) __A , __A = map(int, input().split()) __A = calculate_waitingtime(arrival_time, burst_time, no_of_processes) __A = burst_time __A = no_of_processes __A = waiting_time __A = calculate_turnaroundtime(bt, n, wt) calculate_average_times(waiting_time, turn_around_time, no_of_processes) __A = pd.DataFrame( list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)), columns=[ "Process", "BurstTime", "ArrivalTime", "WaitingTime", "TurnAroundTime", ], ) # Printing the dataFrame pd.set_option("display.max_rows", fcfs.shape[0] + 1) print(fcfs)
68
from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf __A = logging.get_logger(__name__) @dataclass class _A ( UpperCamelCase ): """simple docstring""" lowerCamelCase : Optional[int] = [ 'no_inference', 'no_cuda', 'no_tpu', 'no_speed', 'no_memory', 'no_env_print', 'no_multi_process', ] def __init__( self : Any , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Dict: for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: __UpperCAmelCase =deprecated_arg[3:] __UpperCAmelCase =not kwargs.pop(__SCREAMING_SNAKE_CASE ) logger.warning( f'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or''' f''' {positive_arg}={kwargs[positive_arg]}''' ) __UpperCAmelCase =kwargs.pop("""tpu_name""" , self.tpu_name ) __UpperCAmelCase =kwargs.pop("""device_idx""" , self.device_idx ) __UpperCAmelCase =kwargs.pop("""eager_mode""" , self.eager_mode ) __UpperCAmelCase =kwargs.pop("""use_xla""" , self.use_xla ) super().__init__(**__SCREAMING_SNAKE_CASE ) lowerCamelCase : str = field( default=UpperCamelCase , metadata={'help': 'Name of TPU'} , ) lowerCamelCase : int = field( default=0 , metadata={'help': 'CPU / GPU device index. Defaults to 0.'} , ) lowerCamelCase : bool = field(default=UpperCamelCase , metadata={'help': 'Benchmark models in eager model.'} ) lowerCamelCase : bool = field( default=UpperCamelCase , metadata={ 'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.' } , ) @cached_property def _a ( self : List[str] ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]: requires_backends(self , ["""tf"""] ) __UpperCAmelCase =None if self.tpu: try: if self.tpu_name: __UpperCAmelCase =tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: __UpperCAmelCase =tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: __UpperCAmelCase =None return tpu @cached_property def _a ( self : Tuple ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]: requires_backends(self , ["""tf"""] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) __UpperCAmelCase =tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" ) __UpperCAmelCase =tf.distribute.OneDeviceStrategy(device=f'''/gpu:{self.device_idx}''' ) else: tf.config.set_visible_devices([] , """GPU""" ) # disable GPU __UpperCAmelCase =tf.distribute.OneDeviceStrategy(device=f'''/cpu:{self.device_idx}''' ) return strategy @property def _a ( self : Optional[Any] ) -> bool: requires_backends(self , ["""tf"""] ) return self._setup_tpu is not None @property def _a ( self : str ) -> "tf.distribute.Strategy": requires_backends(self , ["""tf"""] ) return self._setup_strategy @property def _a ( self : Dict ) -> Optional[int]: requires_backends(self , ["""tf"""] ) return tf.config.list_physical_devices("""GPU""" ) @property def _a ( self : List[str] ) -> int: requires_backends(self , ["""tf"""] ) if self.cuda: return len(self.gpu_list ) return 0 @property def _a ( self : List[str] ) -> bool: return self.n_gpu > 0
68
1
from __future__ import annotations def lowercase__ ( A_: list[int] ) -> bool: """simple docstring""" return len(set(A_ ) ) == len(A_ ) if __name__ == "__main__": import doctest doctest.testmod()
68
import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class _A ( unittest.TestCase ): """simple docstring""" @property def _a ( self : List[str] ) -> Dict: torch.manual_seed(0 ) __UpperCAmelCase =UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , ) return model def _a ( self : int ) -> Union[str, Any]: __UpperCAmelCase =self.dummy_uncond_unet __UpperCAmelCase =ScoreSdeVeScheduler() __UpperCAmelCase =ScoreSdeVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE ) sde_ve.to(__SCREAMING_SNAKE_CASE ) sde_ve.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =torch.manual_seed(0 ) __UpperCAmelCase =sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=__SCREAMING_SNAKE_CASE ).images __UpperCAmelCase =torch.manual_seed(0 ) __UpperCAmelCase =sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )[ 0 ] __UpperCAmelCase =image[0, -3:, -3:, -1] __UpperCAmelCase =image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __UpperCAmelCase =np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class _A ( unittest.TestCase ): """simple docstring""" def _a ( self : Optional[int] ) -> int: __UpperCAmelCase ="""google/ncsnpp-church-256""" __UpperCAmelCase =UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =ScoreSdeVeScheduler.from_pretrained(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =ScoreSdeVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE ) sde_ve.to(__SCREAMING_SNAKE_CASE ) sde_ve.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =torch.manual_seed(0 ) __UpperCAmelCase =sde_ve(num_inference_steps=10 , output_type="""numpy""" , generator=__SCREAMING_SNAKE_CASE ).images __UpperCAmelCase =image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) __UpperCAmelCase =np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
68
1
from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer __A = logging.get_logger(__name__) __A = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } __A = { "vocab_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json" }, "merges_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt" }, "tokenizer_config_file": { "facebook/blenderbot_small-90M": ( "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json" ) }, } __A = { "facebook/blenderbot_small-90M": 5_12, } class _A ( UpperCamelCase ): """simple docstring""" lowerCamelCase : Tuple = VOCAB_FILES_NAMES lowerCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase : Union[str, Any] = BlenderbotSmallTokenizer def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Optional[Any]="<|endoftext|>" , __SCREAMING_SNAKE_CASE : str="<|endoftext|>" , __SCREAMING_SNAKE_CASE : Any="<|endoftext|>" , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : Optional[int]=True , **__SCREAMING_SNAKE_CASE : Tuple , ) -> Tuple: super().__init__( ByteLevelBPETokenizer( vocab=__SCREAMING_SNAKE_CASE , merges=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE , ) , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) __UpperCAmelCase =add_prefix_space def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> Any: __UpperCAmelCase =[self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _a ( self : Dict , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]: __UpperCAmelCase =[self.sep_token_id] __UpperCAmelCase =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
68
from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging from transformers.integrations import is_fairscale_available from transformers.models.fsmt.configuration_fsmt import FSMTConfig from transformers.optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.trainer_pt_utils import get_tpu_sampler from transformers.training_args import ParallelMode from transformers.utils import is_torch_tpu_available if is_fairscale_available(): from fairscale.optim import OSS __A = logging.get_logger(__name__) __A = { "linear": get_linear_schedule_with_warmup, "cosine": get_cosine_schedule_with_warmup, "cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup, "polynomial": get_polynomial_decay_schedule_with_warmup, "constant": get_constant_schedule, "constant_w_warmup": get_constant_schedule_with_warmup, } class _A ( UpperCamelCase ): """simple docstring""" def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : str=None , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Any: super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if config is None: assert isinstance(self.model , __SCREAMING_SNAKE_CASE ), ( "If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is" f''' {self.model.__class__}''' ) __UpperCAmelCase =self.model.config else: __UpperCAmelCase =config __UpperCAmelCase =data_args __UpperCAmelCase =self.config.tgt_vocab_size if isinstance(self.config , __SCREAMING_SNAKE_CASE ) else self.config.vocab_size if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss): assert self.config.pad_token_id is not None, ( "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss" " calculation or doing label smoothing." ) if self.config.pad_token_id is None and self.config.eos_token_id is not None: logger.warning( f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for''' """ padding..""" ) if self.args.label_smoothing == 0: __UpperCAmelCase =torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id ) else: # dynamically import label_smoothed_nll_loss from utils import label_smoothed_nll_loss __UpperCAmelCase =label_smoothed_nll_loss def _a ( self : Any , __SCREAMING_SNAKE_CASE : int ) -> Any: if self.optimizer is None: __UpperCAmelCase =["""bias""", """LayerNorm.weight"""] __UpperCAmelCase =[ { """params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )], """weight_decay""": self.args.weight_decay, }, { """params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )], """weight_decay""": 0.0, }, ] __UpperCAmelCase =Adafactor if self.args.adafactor else AdamW if self.args.adafactor: __UpperCAmelCase =Adafactor __UpperCAmelCase ={"""scale_parameter""": False, """relative_step""": False} else: __UpperCAmelCase =AdamW __UpperCAmelCase ={ """betas""": (self.args.adam_betaa, self.args.adam_betaa), """eps""": self.args.adam_epsilon, } __UpperCAmelCase =self.args.learning_rate if self.sharded_ddp: __UpperCAmelCase =OSS( params=__SCREAMING_SNAKE_CASE , optim=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) else: __UpperCAmelCase =optimizer_cls(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if self.lr_scheduler is None: __UpperCAmelCase =self._get_lr_scheduler(__SCREAMING_SNAKE_CASE ) else: # ignoring --lr_scheduler logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" ) def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Any: __UpperCAmelCase =arg_to_scheduler[self.args.lr_scheduler] if self.args.lr_scheduler == "constant": __UpperCAmelCase =schedule_func(self.optimizer ) elif self.args.lr_scheduler == "constant_w_warmup": __UpperCAmelCase =schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps ) else: __UpperCAmelCase =schedule_func( self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__SCREAMING_SNAKE_CASE ) return scheduler def _a ( self : Optional[Any] ) -> Optional[torch.utils.data.Sampler]: if isinstance(self.train_dataset , torch.utils.data.IterableDataset ): return None elif is_torch_tpu_available(): return get_tpu_sampler(self.train_dataset ) else: if self.args.sortish_sampler: self.train_dataset.make_sortish_sampler( self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , ) return ( RandomSampler(self.train_dataset ) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset ) ) def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple: if self.args.label_smoothing == 0: if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token __UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[0] __UpperCAmelCase =self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) ) else: # compute usual loss via models __UpperCAmelCase , __UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[:2] else: # compute label smoothed loss __UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[0] __UpperCAmelCase =torch.nn.functional.log_softmax(__SCREAMING_SNAKE_CASE , dim=-1 ) __UpperCAmelCase , __UpperCAmelCase =self.loss_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.args.label_smoothing , ignore_index=self.config.pad_token_id ) return loss, logits def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Dict: __UpperCAmelCase =inputs.pop("""labels""" ) __UpperCAmelCase , __UpperCAmelCase =self._compute_loss(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return loss def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : nn.Module , __SCREAMING_SNAKE_CASE : Dict[str, Union[torch.Tensor, Any]] , __SCREAMING_SNAKE_CASE : bool , __SCREAMING_SNAKE_CASE : Optional[List[str]] = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: __UpperCAmelCase =self._prepare_inputs(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase ={ """max_length""": self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, """num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams, } if self.args.predict_with_generate and not self.args.prediction_loss_only: __UpperCAmelCase =self.model.generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **__SCREAMING_SNAKE_CASE , ) # in case the batch is shorter than max length, the output should be padded if generated_tokens.shape[-1] < gen_kwargs["max_length"]: __UpperCAmelCase =self._pad_tensors_to_max_len(__SCREAMING_SNAKE_CASE , gen_kwargs["""max_length"""] ) __UpperCAmelCase =inputs.pop("""labels""" ) with torch.no_grad(): # compute loss on predict data __UpperCAmelCase , __UpperCAmelCase =self._compute_loss(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =loss.mean().detach() if self.args.prediction_loss_only: return (loss, None, None) __UpperCAmelCase =generated_tokens if self.args.predict_with_generate else logits if labels.shape[-1] < gen_kwargs["max_length"]: __UpperCAmelCase =self._pad_tensors_to_max_len(__SCREAMING_SNAKE_CASE , gen_kwargs["""max_length"""] ) return (loss, logits, labels) def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int ) -> List[Any]: # If PAD token is not defined at least EOS token has to be defined __UpperCAmelCase =self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id if pad_token_id is None: raise ValueError( """Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be""" f''' padded to `max_length`={max_length}''' ) __UpperCAmelCase =pad_token_id * torch.ones( (tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device ) __UpperCAmelCase =tensor return padded_tensor
68
1
import gzip import hashlib import json import multiprocessing import os import re import shutil import time from pathlib import Path import numpy as np from arguments import PreprocessingArguments from datasets import load_dataset from minhash_deduplication import deduplicate_dataset from transformers import AutoTokenizer, HfArgumentParser __A = re.compile(r"\s+") def lowercase__ ( A_: int ) -> Any: """simple docstring""" return {"hash": hashlib.mda(re.sub(A_ , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()} def lowercase__ ( A_: int ) -> Optional[int]: """simple docstring""" __UpperCAmelCase =[len(A_ ) for line in example["""content"""].splitlines()] return {"line_mean": np.mean(A_ ), "line_max": max(A_ )} def lowercase__ ( A_: Any ) -> int: """simple docstring""" __UpperCAmelCase =np.mean([c.isalnum() for c in example["""content"""]] ) return {"alpha_frac": alpha_frac} def lowercase__ ( A_: List[Any] , A_: Tuple ) -> str: """simple docstring""" if example["hash"] in uniques: uniques.remove(example["""hash"""] ) return True else: return False def lowercase__ ( A_: List[str] , A_: Dict=5 ) -> Optional[Any]: """simple docstring""" __UpperCAmelCase =["""auto-generated""", """autogenerated""", """automatically generated"""] __UpperCAmelCase =example["""content"""].splitlines() for _, line in zip(range(A_ ) , A_ ): for keyword in keywords: if keyword in line.lower(): return {"autogenerated": True} else: return {"autogenerated": False} def lowercase__ ( A_: str , A_: List[Any]=5 , A_: List[Any]=0.0_5 ) -> List[str]: """simple docstring""" __UpperCAmelCase =["""unit tests""", """test file""", """configuration file"""] __UpperCAmelCase =example["""content"""].splitlines() __UpperCAmelCase =0 __UpperCAmelCase =0 # first test for _, line in zip(range(A_ ) , A_ ): for keyword in keywords: if keyword in line.lower(): return {"config_or_test": True} # second test __UpperCAmelCase =example["""content"""].count("""\n""" ) __UpperCAmelCase =int(coeff * nlines ) for line in lines: count_config += line.lower().count("""config""" ) count_test += line.lower().count("""test""" ) if count_config > threshold or count_test > threshold: return {"config_or_test": True} return {"config_or_test": False} def lowercase__ ( A_: Any ) -> Union[str, Any]: """simple docstring""" __UpperCAmelCase =["""def """, """class """, """for """, """while """] __UpperCAmelCase =example["""content"""].splitlines() for line in lines: for keyword in keywords: if keyword in line.lower(): return {"has_no_keywords": False} return {"has_no_keywords": True} def lowercase__ ( A_: Optional[int] , A_: List[Any]=4 ) -> Any: """simple docstring""" __UpperCAmelCase =example["""content"""].splitlines() __UpperCAmelCase =0 for line in lines: counter += line.lower().count("""=""" ) if counter > minimum: return {"has_few_assignments": False} return {"has_few_assignments": True} def lowercase__ ( A_: List[Any] ) -> Union[str, Any]: """simple docstring""" __UpperCAmelCase =tokenizer(example["""content"""] , truncation=A_ )["""input_ids"""] __UpperCAmelCase =len(example["""content"""] ) / len(A_ ) return {"ratio": ratio} def lowercase__ ( A_: int ) -> str: """simple docstring""" __UpperCAmelCase ={} results.update(get_hash(A_ ) ) results.update(line_stats(A_ ) ) results.update(alpha_stats(A_ ) ) results.update(char_token_ratio(A_ ) ) results.update(is_autogenerated(A_ ) ) results.update(is_config_or_test(A_ ) ) results.update(has_no_keywords(A_ ) ) results.update(has_few_assignments(A_ ) ) return results def lowercase__ ( A_: Dict , A_: Any , A_: List[str] ) -> str: """simple docstring""" if not check_uniques(A_ , A_ ): return False elif example["autogenerated"]: return False elif example["line_max"] > args.line_max: return False elif example["line_mean"] > args.line_mean: return False elif example["alpha_frac"] < args.alpha_frac: return False elif example["ratio"] < args.min_token_ratio: return False elif example["config_or_test"] and np.random.rand() <= args.filter_proba: return False elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba: return False elif example["has_few_assignments"]: return False else: return True def lowercase__ ( A_: List[Any] ) -> Tuple: """simple docstring""" with open(A_ , """rb""" ) as f_in: with gzip.open(str(A_ ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out: shutil.copyfileobj(A_ , A_ ) os.unlink(A_ ) # Settings __A = HfArgumentParser(PreprocessingArguments) __A = parser.parse_args() if args.num_workers is None: __A = multiprocessing.cpu_count() __A = AutoTokenizer.from_pretrained(args.tokenizer_dir) # Load dataset __A = time.time() __A = load_dataset(args.dataset_name, split="train") print(F"""Time to load dataset: {time.time()-t_start:.2f}""") # Run preprocessing __A = time.time() __A = ds.map(preprocess, num_proc=args.num_workers) print(F"""Time to preprocess dataset: {time.time()-t_start:.2f}""") # Deduplicate hashes __A = set(ds.unique("hash")) __A = len(uniques) / len(ds) print(F"""Fraction of duplicates: {1-frac:.2%}""") # Deduplicate data and apply heuristics __A = time.time() __A = ds.filter(filter, fn_kwargs={"uniques": uniques, "args": args}) print(F"""Time to filter dataset: {time.time()-t_start:.2f}""") print(F"""Size of filtered dataset: {len(ds_filter)}""") # Deduplicate with minhash and jaccard similarity if args.near_deduplication: __A = time.time() __A , __A = deduplicate_dataset(ds_filter, args.jaccard_threshold) print(F"""Time to deduplicate dataset: {time.time()-t_start:.2f}""") print(F"""Size of deduplicate dataset: {len(ds_filter)}""") # Save data in batches of samples_per_file __A = Path(args.output_dir) output_dir.mkdir(exist_ok=True) # save duplicate_clusters in the output_dir as artifacts # not sure it is the right place the save it if args.near_deduplication: with open(output_dir / "duplicate_clusters.json", "w") as f: json.dump(duplicate_clusters, f) __A = output_dir / "data" data_dir.mkdir(exist_ok=True) __A = time.time() for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)): __A = str(data_dir / F"""file-{file_number+1:012}.json""") __A = min(len(ds_filter), index + args.samples_per_file) ds_filter.select(list(range(index, end_index))).to_json(file_path) compress_file(file_path) print(F"""Time to save dataset: {time.time()-t_start:.2f}""")
68
import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _A ( UpperCamelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase : List[Any] = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline' def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str=0 ) -> Any: __UpperCAmelCase =floats_tensor((1, 3, 128, 128) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ) __UpperCAmelCase =np.random.RandomState(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase ={ """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 3, """strength""": 0.75, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def _a ( self : Optional[Any] ) -> int: __UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =self.get_dummy_inputs() __UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images __UpperCAmelCase =image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) __UpperCAmelCase =np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def _a ( self : Union[str, Any] ) -> Union[str, Any]: __UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __UpperCAmelCase =PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =self.get_dummy_inputs() __UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images __UpperCAmelCase =image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __UpperCAmelCase =np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _a ( self : Optional[Any] ) -> Dict: __UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __UpperCAmelCase =LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) # warmup pass to apply optimizations __UpperCAmelCase =pipe(**self.get_dummy_inputs() ) __UpperCAmelCase =self.get_dummy_inputs() __UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images __UpperCAmelCase =image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __UpperCAmelCase =np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _a ( self : List[Any] ) -> List[str]: __UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __UpperCAmelCase =EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =self.get_dummy_inputs() __UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images __UpperCAmelCase =image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __UpperCAmelCase =np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _a ( self : Union[str, Any] ) -> Optional[Any]: __UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __UpperCAmelCase =EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =self.get_dummy_inputs() __UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images __UpperCAmelCase =image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __UpperCAmelCase =np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _a ( self : Union[str, Any] ) -> Dict: __UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __UpperCAmelCase =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =self.get_dummy_inputs() __UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images __UpperCAmelCase =image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __UpperCAmelCase =np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class _A ( unittest.TestCase ): """simple docstring""" @property def _a ( self : List[str] ) -> Optional[int]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _a ( self : Dict ) -> int: __UpperCAmelCase =ort.SessionOptions() __UpperCAmelCase =False return options def _a ( self : Dict ) -> Any: __UpperCAmelCase =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) __UpperCAmelCase =init_image.resize((768, 512) ) # using the PNDM scheduler by default __UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained( """CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase ="""A fantasy landscape, trending on artstation""" __UpperCAmelCase =np.random.RandomState(0 ) __UpperCAmelCase =pipe( prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__SCREAMING_SNAKE_CASE , output_type="""np""" , ) __UpperCAmelCase =output.images __UpperCAmelCase =images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) __UpperCAmelCase =np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def _a ( self : List[str] ) -> str: __UpperCAmelCase =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) __UpperCAmelCase =init_image.resize((768, 512) ) __UpperCAmelCase =LMSDiscreteScheduler.from_pretrained( """runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" ) __UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase ="""A fantasy landscape, trending on artstation""" __UpperCAmelCase =np.random.RandomState(0 ) __UpperCAmelCase =pipe( prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__SCREAMING_SNAKE_CASE , output_type="""np""" , ) __UpperCAmelCase =output.images __UpperCAmelCase =images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) __UpperCAmelCase =np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
68
1
from math import factorial class _A : """simple docstring""" def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str ) -> Optional[int]: __UpperCAmelCase =real if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __UpperCAmelCase =[1] * rank else: __UpperCAmelCase =rank def __repr__( self : List[str] ) -> Dict: return ( f'''{self.real}+''' f'''{"+".join(str(__SCREAMING_SNAKE_CASE )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}''' ) def _a ( self : Tuple ) -> Union[str, Any]: __UpperCAmelCase =self.duals.copy() while cur[-1] == 0: cur.pop(-1 ) return Dual(self.real , __SCREAMING_SNAKE_CASE ) def __add__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any ) -> Any: if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): return Dual(self.real + other , self.duals ) __UpperCAmelCase =self.duals.copy() __UpperCAmelCase =other.duals.copy() if len(__SCREAMING_SNAKE_CASE ) > len(__SCREAMING_SNAKE_CASE ): o_dual.extend([1] * (len(__SCREAMING_SNAKE_CASE ) - len(__SCREAMING_SNAKE_CASE )) ) elif len(__SCREAMING_SNAKE_CASE ) < len(__SCREAMING_SNAKE_CASE ): s_dual.extend([1] * (len(__SCREAMING_SNAKE_CASE ) - len(__SCREAMING_SNAKE_CASE )) ) __UpperCAmelCase =[] for i in range(len(__SCREAMING_SNAKE_CASE ) ): new_duals.append(s_dual[i] + o_dual[i] ) return Dual(self.real + other.real , __SCREAMING_SNAKE_CASE ) lowerCamelCase : List[str] = __add__ def __sub__( self : Any , __SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]: return self + other * -1 def __mul__( self : Dict , __SCREAMING_SNAKE_CASE : Tuple ) -> List[str]: if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __UpperCAmelCase =[] for i in self.duals: new_duals.append(i * other ) return Dual(self.real * other , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =[0] * (len(self.duals ) + len(other.duals ) + 1) for i, item in enumerate(self.duals ): for j, jtem in enumerate(other.duals ): new_duals[i + j + 1] += item * jtem for k in range(len(self.duals ) ): new_duals[k] += self.duals[k] * other.real for index in range(len(other.duals ) ): new_duals[index] += other.duals[index] * self.real return Dual(self.real * other.real , __SCREAMING_SNAKE_CASE ) lowerCamelCase : Optional[Any] = __mul__ def __truediv__( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str] ) -> str: if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __UpperCAmelCase =[] for i in self.duals: new_duals.append(i / other ) return Dual(self.real / other , __SCREAMING_SNAKE_CASE ) raise ValueError def __floordiv__( self : int , __SCREAMING_SNAKE_CASE : Tuple ) -> int: if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __UpperCAmelCase =[] for i in self.duals: new_duals.append(i // other ) return Dual(self.real // other , __SCREAMING_SNAKE_CASE ) raise ValueError def __pow__( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] ) -> int: if n < 0 or isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise ValueError("""power must be a positive integer""" ) if n == 0: return 1 if n == 1: return self __UpperCAmelCase =self for _ in range(n - 1 ): x *= self return x def lowercase__ ( A_: int , A_: List[str] , A_: str ) -> Any: """simple docstring""" if not callable(A_ ): raise ValueError("""differentiate() requires a function as input for func""" ) if not isinstance(A_ , (float, int) ): raise ValueError("""differentiate() requires a float as input for position""" ) if not isinstance(A_ , A_ ): raise ValueError("""differentiate() requires an int as input for order""" ) __UpperCAmelCase =Dual(A_ , 1 ) __UpperCAmelCase =func(A_ ) if order == 0: return result.real return result.duals[order - 1] * factorial(A_ ) if __name__ == "__main__": import doctest doctest.testmod() def lowercase__ ( A_: Any ) -> int: """simple docstring""" return y**2 * y**4 print(differentiate(f, 9, 2))
68
import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors __A = logging.getLogger(__name__) class _A ( UpperCamelCase ): """simple docstring""" lowerCamelCase : Optional[Any] = 'sequence-classification' def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Tuple ) -> Optional[Any]: if type(__SCREAMING_SNAKE_CASE ) == dict: __UpperCAmelCase =Namespace(**__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =glue_output_modes[hparams.task] __UpperCAmelCase =glue_tasks_num_labels[hparams.task] super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.mode ) def _a ( self : str , **__SCREAMING_SNAKE_CASE : Dict ) -> List[str]: return self.model(**__SCREAMING_SNAKE_CASE ) def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict ) -> List[Any]: __UpperCAmelCase ={"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: __UpperCAmelCase =batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None __UpperCAmelCase =self(**__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =outputs[0] __UpperCAmelCase =self.trainer.lr_schedulers[0]["""scheduler"""] __UpperCAmelCase ={"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def _a ( self : Tuple ) -> List[Any]: __UpperCAmelCase =self.hparams __UpperCAmelCase =processors[args.task]() __UpperCAmelCase =processor.get_labels() for mode in ["train", "dev"]: __UpperCAmelCase =self._feature_file(__SCREAMING_SNAKE_CASE ) if os.path.exists(__SCREAMING_SNAKE_CASE ) and not args.overwrite_cache: logger.info("""Loading features from cached file %s""" , __SCREAMING_SNAKE_CASE ) else: logger.info("""Creating features from dataset file at %s""" , args.data_dir ) __UpperCAmelCase =( processor.get_dev_examples(args.data_dir ) if mode == """dev""" else processor.get_train_examples(args.data_dir ) ) __UpperCAmelCase =convert_examples_to_features( __SCREAMING_SNAKE_CASE , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info("""Saving features into cached file %s""" , __SCREAMING_SNAKE_CASE ) torch.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : bool = False ) -> DataLoader: __UpperCAmelCase ="""dev""" if mode == """test""" else mode __UpperCAmelCase =self._feature_file(__SCREAMING_SNAKE_CASE ) logger.info("""Loading features from cached file %s""" , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =torch.load(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =torch.tensor([f.input_ids for f in features] , dtype=torch.long ) __UpperCAmelCase =torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) __UpperCAmelCase =torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) if self.hparams.glue_output_mode == "classification": __UpperCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.long ) elif self.hparams.glue_output_mode == "regression": __UpperCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.float ) return DataLoader( TensorDataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , batch_size=__SCREAMING_SNAKE_CASE , shuffle=__SCREAMING_SNAKE_CASE , ) def _a ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int ) -> str: __UpperCAmelCase ={"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: __UpperCAmelCase =batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None __UpperCAmelCase =self(**__SCREAMING_SNAKE_CASE ) __UpperCAmelCase , __UpperCAmelCase =outputs[:2] __UpperCAmelCase =logits.detach().cpu().numpy() __UpperCAmelCase =inputs["""labels"""].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Any ) -> tuple: __UpperCAmelCase =torch.stack([x["""val_loss"""] for x in outputs] ).mean().detach().cpu().item() __UpperCAmelCase =np.concatenate([x["""pred"""] for x in outputs] , axis=0 ) if self.hparams.glue_output_mode == "classification": __UpperCAmelCase =np.argmax(__SCREAMING_SNAKE_CASE , axis=1 ) elif self.hparams.glue_output_mode == "regression": __UpperCAmelCase =np.squeeze(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =np.concatenate([x["""target"""] for x in outputs] , axis=0 ) __UpperCAmelCase =[[] for _ in range(out_label_ids.shape[0] )] __UpperCAmelCase =[[] for _ in range(out_label_ids.shape[0] )] __UpperCAmelCase ={**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )} __UpperCAmelCase =dict(results.items() ) __UpperCAmelCase =results return ret, preds_list, out_label_list def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : list ) -> dict: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._eval_end(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =ret["""log"""] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : List[str] ) -> dict: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._eval_end(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =ret["""log"""] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def _a ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict ) -> Optional[Any]: BaseTransformer.add_model_specific_args(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) parser.add_argument( """--max_seq_length""" , default=128 , type=__SCREAMING_SNAKE_CASE , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--task""" , default="""""" , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""The GLUE task to run""" , ) parser.add_argument( """--gpus""" , default=0 , type=__SCREAMING_SNAKE_CASE , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , ) parser.add_argument( """--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" ) return parser def lowercase__ ( ) -> str: """simple docstring""" __UpperCAmelCase =argparse.ArgumentParser() add_generic_args(A_ , os.getcwd() ) __UpperCAmelCase =GLUETransformer.add_model_specific_args(A_ , os.getcwd() ) __UpperCAmelCase =parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: __UpperCAmelCase =os.path.join( """./results""" , F'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' , ) os.makedirs(args.output_dir ) __UpperCAmelCase =GLUETransformer(A_ ) __UpperCAmelCase =generic_train(A_ , A_ ) # Optionally, predict on dev set and write to output_dir if args.do_predict: __UpperCAmelCase =sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=A_ ) ) __UpperCAmelCase =model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(A_ ) if __name__ == "__main__": main()
68
1
import numpy as np def lowercase__ ( A_: np.ndarray ) -> np.ndarray: """simple docstring""" return 1 / (1 + np.exp(-vector )) def lowercase__ ( A_: np.ndarray ) -> np.ndarray: """simple docstring""" return vector * sigmoid(A_ ) if __name__ == "__main__": import doctest doctest.testmod()
68
def lowercase__ ( A_: int , A_: int ) -> int: """simple docstring""" return 1 if input_a == input_a else 0 def lowercase__ ( ) -> None: """simple docstring""" assert xnor_gate(0 , 0 ) == 1 assert xnor_gate(0 , 1 ) == 0 assert xnor_gate(1 , 0 ) == 0 assert xnor_gate(1 , 1 ) == 1 if __name__ == "__main__": print(xnor_gate(0, 0)) print(xnor_gate(0, 1)) print(xnor_gate(1, 0)) print(xnor_gate(1, 1))
68
1
__A = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []} __A = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} def lowercase__ ( A_: dict[int, list[int]] , A_: int , A_: list[bool] ) -> list[int]: """simple docstring""" __UpperCAmelCase =True __UpperCAmelCase =[] for neighbour in graph[vert]: if not visited[neighbour]: order += topology_sort(A_ , A_ , A_ ) order.append(A_ ) return order def lowercase__ ( A_: dict[int, list[int]] , A_: int , A_: list[bool] ) -> list[int]: """simple docstring""" __UpperCAmelCase =True __UpperCAmelCase =[vert] for neighbour in reversed_graph[vert]: if not visited[neighbour]: component += find_components(A_ , A_ , A_ ) return component def lowercase__ ( A_: dict[int, list[int]] ) -> list[list[int]]: """simple docstring""" __UpperCAmelCase =len(A_ ) * [False] __UpperCAmelCase ={vert: [] for vert in range(len(A_ ) )} for vert, neighbours in graph.items(): for neighbour in neighbours: reversed_graph[neighbour].append(A_ ) __UpperCAmelCase =[] for i, was_visited in enumerate(A_ ): if not was_visited: order += topology_sort(A_ , A_ , A_ ) __UpperCAmelCase =[] __UpperCAmelCase =len(A_ ) * [False] for i in range(len(A_ ) ): __UpperCAmelCase =order[len(A_ ) - i - 1] if not visited[vert]: __UpperCAmelCase =find_components(A_ , A_ , A_ ) components_list.append(A_ ) return components_list
68
from __future__ import annotations import bisect def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> int: """simple docstring""" if hi < 0: __UpperCAmelCase =len(A_ ) while lo < hi: __UpperCAmelCase =lo + (hi - lo) // 2 if sorted_collection[mid] < item: __UpperCAmelCase =mid + 1 else: __UpperCAmelCase =mid return lo def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> int: """simple docstring""" if hi < 0: __UpperCAmelCase =len(A_ ) while lo < hi: __UpperCAmelCase =lo + (hi - lo) // 2 if sorted_collection[mid] <= item: __UpperCAmelCase =mid + 1 else: __UpperCAmelCase =mid return lo def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> None: """simple docstring""" sorted_collection.insert(bisect_left(A_ , A_ , A_ , A_ ) , A_ ) def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> None: """simple docstring""" sorted_collection.insert(bisect_right(A_ , A_ , A_ , A_ ) , A_ ) def lowercase__ ( A_: list[int] , A_: int ) -> int | None: """simple docstring""" __UpperCAmelCase =0 __UpperCAmelCase =len(A_ ) - 1 while left <= right: __UpperCAmelCase =left + (right - left) // 2 __UpperCAmelCase =sorted_collection[midpoint] if current_item == item: return midpoint elif item < current_item: __UpperCAmelCase =midpoint - 1 else: __UpperCAmelCase =midpoint + 1 return None def lowercase__ ( A_: list[int] , A_: int ) -> int | None: """simple docstring""" __UpperCAmelCase =bisect.bisect_left(A_ , A_ ) if index != len(A_ ) and sorted_collection[index] == item: return index return None def lowercase__ ( A_: list[int] , A_: int , A_: int , A_: int ) -> int | None: """simple docstring""" if right < left: return None __UpperCAmelCase =left + (right - left) // 2 if sorted_collection[midpoint] == item: return midpoint elif sorted_collection[midpoint] > item: return binary_search_by_recursion(A_ , A_ , A_ , midpoint - 1 ) else: return binary_search_by_recursion(A_ , A_ , midpoint + 1 , A_ ) if __name__ == "__main__": __A = input("Enter numbers separated by comma:\n").strip() __A = sorted(int(item) for item in user_input.split(",")) __A = int(input("Enter a single number to be found in the list:\n")) __A = binary_search(collection, target) if result is None: print(F"""{target} was not found in {collection}.""") else: print(F"""{target} was found at position {result} in {collection}.""")
68
1
from __future__ import annotations from random import random from typing import Generic, TypeVar __A = TypeVar("KT") __A = TypeVar("VT") class _A ( Generic[KT, VT] ): """simple docstring""" def __init__( self : int , __SCREAMING_SNAKE_CASE : KT | str = "root" , __SCREAMING_SNAKE_CASE : VT | None = None ) -> List[Any]: __UpperCAmelCase =key __UpperCAmelCase =value __UpperCAmelCase =[] def __repr__( self : str ) -> str: return f'''Node({self.key}: {self.value})''' @property def _a ( self : str ) -> int: return len(self.forward ) class _A ( Generic[KT, VT] ): """simple docstring""" def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : float = 0.5 , __SCREAMING_SNAKE_CASE : int = 16 ) -> Optional[int]: __UpperCAmelCase =Node[KT, VT]() __UpperCAmelCase =0 __UpperCAmelCase =p __UpperCAmelCase =max_level def __str__( self : Optional[int] ) -> str: __UpperCAmelCase =list(self ) if len(__SCREAMING_SNAKE_CASE ) == 0: return f'''SkipList(level={self.level})''' __UpperCAmelCase =max((len(str(__SCREAMING_SNAKE_CASE ) ) for item in items) , default=4 ) __UpperCAmelCase =max(__SCREAMING_SNAKE_CASE , 4 ) + 4 __UpperCAmelCase =self.head __UpperCAmelCase =[] __UpperCAmelCase =node.forward.copy() lines.append(f'''[{node.key}]'''.ljust(__SCREAMING_SNAKE_CASE , """-""" ) + """* """ * len(__SCREAMING_SNAKE_CASE ) ) lines.append(""" """ * label_size + """| """ * len(__SCREAMING_SNAKE_CASE ) ) while len(node.forward ) != 0: __UpperCAmelCase =node.forward[0] lines.append( f'''[{node.key}]'''.ljust(__SCREAMING_SNAKE_CASE , """-""" ) + """ """.join(str(n.key ) if n.key == node.key else """|""" for n in forwards ) ) lines.append(""" """ * label_size + """| """ * len(__SCREAMING_SNAKE_CASE ) ) __UpperCAmelCase =node.forward lines.append("""None""".ljust(__SCREAMING_SNAKE_CASE ) + """* """ * len(__SCREAMING_SNAKE_CASE ) ) return f'''SkipList(level={self.level})\n''' + "\n".join(__SCREAMING_SNAKE_CASE ) def __iter__( self : Tuple ) -> Optional[int]: __UpperCAmelCase =self.head while len(node.forward ) != 0: yield node.forward[0].key __UpperCAmelCase =node.forward[0] def _a ( self : Dict ) -> int: __UpperCAmelCase =1 while random() < self.p and level < self.max_level: level += 1 return level def _a ( self : int , __SCREAMING_SNAKE_CASE : List[str] ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]: __UpperCAmelCase =[] __UpperCAmelCase =self.head for i in reversed(range(self.level ) ): # i < node.level - When node level is lesser than `i` decrement `i`. # node.forward[i].key < key - Jumping to node with key value higher # or equal to searched key would result # in skipping searched key. while i < node.level and node.forward[i].key < key: __UpperCAmelCase =node.forward[i] # Each leftmost node (relative to searched node) will potentially have to # be updated. update_vector.append(__SCREAMING_SNAKE_CASE ) update_vector.reverse() # Note that we were inserting values in reverse order. # len(node.forward) != 0 - If current node doesn't contain any further # references then searched key is not present. # node.forward[0].key == key - Next node key should be equal to search key # if key is present. if len(node.forward ) != 0 and node.forward[0].key == key: return node.forward[0], update_vector else: return None, update_vector def _a ( self : Any , __SCREAMING_SNAKE_CASE : KT ) -> Any: __UpperCAmelCase , __UpperCAmelCase =self._locate_node(__SCREAMING_SNAKE_CASE ) if node is not None: for i, update_node in enumerate(__SCREAMING_SNAKE_CASE ): # Remove or replace all references to removed node. if update_node.level > i and update_node.forward[i].key == key: if node.level > i: __UpperCAmelCase =node.forward[i] else: __UpperCAmelCase =update_node.forward[:i] def _a ( self : Dict , __SCREAMING_SNAKE_CASE : KT , __SCREAMING_SNAKE_CASE : VT ) -> Any: __UpperCAmelCase , __UpperCAmelCase =self._locate_node(__SCREAMING_SNAKE_CASE ) if node is not None: __UpperCAmelCase =value else: __UpperCAmelCase =self.random_level() if level > self.level: # After level increase we have to add additional nodes to head. for _ in range(self.level - 1 , __SCREAMING_SNAKE_CASE ): update_vector.append(self.head ) __UpperCAmelCase =level __UpperCAmelCase =Node(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for i, update_node in enumerate(update_vector[:level] ): # Change references to pass through new node. if update_node.level > i: new_node.forward.append(update_node.forward[i] ) if update_node.level < i + 1: update_node.forward.append(__SCREAMING_SNAKE_CASE ) else: __UpperCAmelCase =new_node def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : VT ) -> VT | None: __UpperCAmelCase , __UpperCAmelCase =self._locate_node(__SCREAMING_SNAKE_CASE ) if node is not None: return node.value return None def lowercase__ ( ) -> Any: """simple docstring""" __UpperCAmelCase =SkipList() skip_list.insert("""Key1""" , 3 ) skip_list.insert("""Key2""" , 12 ) skip_list.insert("""Key3""" , 41 ) skip_list.insert("""Key4""" , -19 ) __UpperCAmelCase =skip_list.head __UpperCAmelCase ={} while node.level != 0: __UpperCAmelCase =node.forward[0] __UpperCAmelCase =node.value assert len(A_ ) == 4 assert all_values["Key1"] == 3 assert all_values["Key2"] == 12 assert all_values["Key3"] == 41 assert all_values["Key4"] == -19 def lowercase__ ( ) -> Optional[Any]: """simple docstring""" __UpperCAmelCase =SkipList() skip_list.insert("""Key1""" , 10 ) skip_list.insert("""Key1""" , 12 ) skip_list.insert("""Key5""" , 7 ) skip_list.insert("""Key7""" , 10 ) skip_list.insert("""Key10""" , 5 ) skip_list.insert("""Key7""" , 7 ) skip_list.insert("""Key5""" , 5 ) skip_list.insert("""Key10""" , 10 ) __UpperCAmelCase =skip_list.head __UpperCAmelCase ={} while node.level != 0: __UpperCAmelCase =node.forward[0] __UpperCAmelCase =node.value if len(A_ ) != 4: print() assert len(A_ ) == 4 assert all_values["Key1"] == 12 assert all_values["Key7"] == 7 assert all_values["Key5"] == 5 assert all_values["Key10"] == 10 def lowercase__ ( ) -> Union[str, Any]: """simple docstring""" __UpperCAmelCase =SkipList() assert skip_list.find("""Some key""" ) is None def lowercase__ ( ) -> int: """simple docstring""" __UpperCAmelCase =SkipList() skip_list.insert("""Key2""" , 20 ) assert skip_list.find("""Key2""" ) == 20 skip_list.insert("""Some Key""" , 10 ) skip_list.insert("""Key2""" , 8 ) skip_list.insert("""V""" , 13 ) assert skip_list.find("""Y""" ) is None assert skip_list.find("""Key2""" ) == 8 assert skip_list.find("""Some Key""" ) == 10 assert skip_list.find("""V""" ) == 13 def lowercase__ ( ) -> Tuple: """simple docstring""" __UpperCAmelCase =SkipList() skip_list.delete("""Some key""" ) assert len(skip_list.head.forward ) == 0 def lowercase__ ( ) -> Dict: """simple docstring""" __UpperCAmelCase =SkipList() skip_list.insert("""Key1""" , 12 ) skip_list.insert("""V""" , 13 ) skip_list.insert("""X""" , 14 ) skip_list.insert("""Key2""" , 15 ) skip_list.delete("""V""" ) skip_list.delete("""Key2""" ) assert skip_list.find("""V""" ) is None assert skip_list.find("""Key2""" ) is None def lowercase__ ( ) -> Any: """simple docstring""" __UpperCAmelCase =SkipList() skip_list.insert("""Key1""" , 12 ) skip_list.insert("""V""" , 13 ) skip_list.insert("""X""" , 14 ) skip_list.insert("""Key2""" , 15 ) skip_list.delete("""V""" ) assert skip_list.find("""V""" ) is None assert skip_list.find("""X""" ) == 14 assert skip_list.find("""Key1""" ) == 12 assert skip_list.find("""Key2""" ) == 15 skip_list.delete("""X""" ) assert skip_list.find("""V""" ) is None assert skip_list.find("""X""" ) is None assert skip_list.find("""Key1""" ) == 12 assert skip_list.find("""Key2""" ) == 15 skip_list.delete("""Key1""" ) assert skip_list.find("""V""" ) is None assert skip_list.find("""X""" ) is None assert skip_list.find("""Key1""" ) is None assert skip_list.find("""Key2""" ) == 15 skip_list.delete("""Key2""" ) assert skip_list.find("""V""" ) is None assert skip_list.find("""X""" ) is None assert skip_list.find("""Key1""" ) is None assert skip_list.find("""Key2""" ) is None def lowercase__ ( ) -> Dict: """simple docstring""" __UpperCAmelCase =SkipList() skip_list.insert("""Key1""" , 12 ) skip_list.insert("""V""" , 13 ) skip_list.insert("""X""" , 142 ) skip_list.insert("""Key2""" , 15 ) skip_list.delete("""X""" ) def traverse_keys(A_: Dict ): yield node.key for forward_node in node.forward: yield from traverse_keys(A_ ) assert len(set(traverse_keys(skip_list.head ) ) ) == 4 def lowercase__ ( ) -> Optional[Any]: """simple docstring""" def is_sorted(A_: Any ): return all(next_item >= item for item, next_item in zip(A_ , lst[1:] ) ) __UpperCAmelCase =SkipList() for i in range(10 ): skip_list.insert(A_ , A_ ) assert is_sorted(list(A_ ) ) skip_list.delete(5 ) skip_list.delete(8 ) skip_list.delete(2 ) assert is_sorted(list(A_ ) ) skip_list.insert(-12 , -12 ) skip_list.insert(77 , 77 ) assert is_sorted(list(A_ ) ) def lowercase__ ( ) -> Dict: """simple docstring""" for _ in range(100 ): # Repeat test 100 times due to the probabilistic nature of skip list # random values == random bugs test_insert() test_insert_overrides_existing_value() test_searching_empty_list_returns_none() test_search() test_deleting_item_from_empty_list_do_nothing() test_deleted_items_are_not_founded_by_find_method() test_delete_removes_only_given_key() test_delete_doesnt_leave_dead_nodes() test_iter_always_yields_sorted_values() def lowercase__ ( ) -> Optional[int]: """simple docstring""" __UpperCAmelCase =SkipList() skip_list.insert(2 , """2""" ) skip_list.insert(4 , """4""" ) skip_list.insert(6 , """4""" ) skip_list.insert(4 , """5""" ) skip_list.insert(8 , """4""" ) skip_list.insert(9 , """4""" ) skip_list.delete(4 ) print(A_ ) if __name__ == "__main__": import doctest doctest.testmod() main()
68
from typing import List from .keymap import KEYMAP, get_character def lowercase__ ( A_: str ) -> str: """simple docstring""" def decorator(A_: int ): __UpperCAmelCase =getattr(A_ , """handle_key""" , [] ) handle += [key] setattr(A_ , """handle_key""" , A_ ) return func return decorator def lowercase__ ( *A_: List[str] ) -> Optional[int]: """simple docstring""" def decorator(A_: Tuple ): __UpperCAmelCase =getattr(A_ , """handle_key""" , [] ) handle += keys setattr(A_ , """handle_key""" , A_ ) return func return decorator class _A ( UpperCamelCase ): """simple docstring""" def __new__( cls : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] ) -> int: __UpperCAmelCase =super().__new__(cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if not hasattr(__SCREAMING_SNAKE_CASE , """key_handler""" ): setattr(__SCREAMING_SNAKE_CASE , """key_handler""" , {} ) setattr(__SCREAMING_SNAKE_CASE , """handle_input""" , KeyHandler.handle_input ) for value in attrs.values(): __UpperCAmelCase =getattr(__SCREAMING_SNAKE_CASE , """handle_key""" , [] ) for key in handled_keys: __UpperCAmelCase =value return new_cls @staticmethod def _a ( cls : Dict ) -> List[Any]: __UpperCAmelCase =get_character() if char != KEYMAP["undefined"]: __UpperCAmelCase =ord(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =cls.key_handler.get(__SCREAMING_SNAKE_CASE ) if handler: __UpperCAmelCase =char return handler(cls ) else: return None def lowercase__ ( cls: str ) -> int: """simple docstring""" return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
68
1
def lowercase__ ( A_: int = 1 , A_: int = 1000 ) -> int: """simple docstring""" __UpperCAmelCase =1 __UpperCAmelCase =0 for divide_by_number in range(A_ , digit + 1 ): __UpperCAmelCase =[] __UpperCAmelCase =numerator for _ in range(1 , digit + 1 ): if now_divide in has_been_divided: if longest_list_length < len(A_ ): __UpperCAmelCase =len(A_ ) __UpperCAmelCase =divide_by_number else: has_been_divided.append(A_ ) __UpperCAmelCase =now_divide * 10 % divide_by_number return the_digit # Tests if __name__ == "__main__": import doctest doctest.testmod()
68
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline else: from .camera import create_pan_cameras from .pipeline_shap_e import ShapEPipeline from .pipeline_shap_e_img2img import ShapEImgaImgPipeline from .renderer import ( BoundingBoxVolume, ImportanceRaySampler, MLPNeRFModelOutput, MLPNeRSTFModel, ShapEParamsProjModel, ShapERenderer, StratifiedRaySampler, VoidNeRFModel, )
68
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { "andreasmadsen/efficient_mlm_m0.40": ( "https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json" ), } class _A ( UpperCamelCase ): """simple docstring""" lowerCamelCase : Dict = 'roberta-prelayernorm' def __init__( self : int , __SCREAMING_SNAKE_CASE : Optional[Any]=50265 , __SCREAMING_SNAKE_CASE : Optional[int]=768 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : List[str]=12 , __SCREAMING_SNAKE_CASE : Optional[Any]=3072 , __SCREAMING_SNAKE_CASE : List[Any]="gelu" , __SCREAMING_SNAKE_CASE : List[str]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : Tuple=512 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=1e-12 , __SCREAMING_SNAKE_CASE : Optional[int]=1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0 , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : Union[str, Any]="absolute" , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Tuple=None , **__SCREAMING_SNAKE_CASE : str , ) -> Any: super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =vocab_size __UpperCAmelCase =hidden_size __UpperCAmelCase =num_hidden_layers __UpperCAmelCase =num_attention_heads __UpperCAmelCase =hidden_act __UpperCAmelCase =intermediate_size __UpperCAmelCase =hidden_dropout_prob __UpperCAmelCase =attention_probs_dropout_prob __UpperCAmelCase =max_position_embeddings __UpperCAmelCase =type_vocab_size __UpperCAmelCase =initializer_range __UpperCAmelCase =layer_norm_eps __UpperCAmelCase =position_embedding_type __UpperCAmelCase =use_cache __UpperCAmelCase =classifier_dropout class _A ( UpperCamelCase ): """simple docstring""" @property def _a ( self : List[str] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": __UpperCAmelCase ={0: """batch""", 1: """choice""", 2: """sequence"""} else: __UpperCAmelCase ={0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
68
import unittest import numpy as np from transformers import RobertaConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.roberta.modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, ) class _A ( unittest.TestCase ): """simple docstring""" def __init__( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=13 , __SCREAMING_SNAKE_CASE : Dict=7 , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : Optional[Any]=99 , __SCREAMING_SNAKE_CASE : Dict=32 , __SCREAMING_SNAKE_CASE : int=5 , __SCREAMING_SNAKE_CASE : Dict=4 , __SCREAMING_SNAKE_CASE : str=37 , __SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : str=512 , __SCREAMING_SNAKE_CASE : Dict=16 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 , __SCREAMING_SNAKE_CASE : List[str]=4 , ) -> Optional[Any]: __UpperCAmelCase =parent __UpperCAmelCase =batch_size __UpperCAmelCase =seq_length __UpperCAmelCase =is_training __UpperCAmelCase =use_attention_mask __UpperCAmelCase =use_token_type_ids __UpperCAmelCase =use_labels __UpperCAmelCase =vocab_size __UpperCAmelCase =hidden_size __UpperCAmelCase =num_hidden_layers __UpperCAmelCase =num_attention_heads __UpperCAmelCase =intermediate_size __UpperCAmelCase =hidden_act __UpperCAmelCase =hidden_dropout_prob __UpperCAmelCase =attention_probs_dropout_prob __UpperCAmelCase =max_position_embeddings __UpperCAmelCase =type_vocab_size __UpperCAmelCase =type_sequence_label_size __UpperCAmelCase =initializer_range __UpperCAmelCase =num_choices def _a ( self : List[Any] ) -> List[str]: __UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase =None if self.use_attention_mask: __UpperCAmelCase =random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase =None if self.use_token_type_ids: __UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCAmelCase =RobertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _a ( self : Tuple ) -> Optional[int]: __UpperCAmelCase =self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =config_and_inputs __UpperCAmelCase ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict def _a ( self : List[str] ) -> Dict: __UpperCAmelCase =self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =config_and_inputs __UpperCAmelCase =True __UpperCAmelCase =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax class _A ( UpperCamelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase : Union[str, Any] = True lowerCamelCase : Union[str, Any] = ( ( FlaxRobertaModel, FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, ) if is_flax_available() else () ) def _a ( self : List[Any] ) -> List[str]: __UpperCAmelCase =FlaxRobertaModelTester(self ) @slow def _a ( self : Optional[Any] ) -> List[Any]: for model_class_name in self.all_model_classes: __UpperCAmelCase =model_class_name.from_pretrained("""roberta-base""" , from_pt=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model(np.ones((1, 1) ) ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
68
1
import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class _A ( unittest.TestCase ): """simple docstring""" def _a ( self : Optional[Any] ) -> Optional[Any]: __UpperCAmelCase =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) __UpperCAmelCase =AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =-1 __UpperCAmelCase =ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: __UpperCAmelCase =TextStreamer(__SCREAMING_SNAKE_CASE ) model.generate(__SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=__SCREAMING_SNAKE_CASE , streamer=__SCREAMING_SNAKE_CASE ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer __UpperCAmelCase =cs.out[:-1] self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : Optional[Any] ) -> Union[str, Any]: __UpperCAmelCase =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) __UpperCAmelCase =AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =-1 __UpperCAmelCase =ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =tokenizer.decode(greedy_ids[0] ) __UpperCAmelCase =TextIteratorStreamer(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase ={"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer} __UpperCAmelCase =Thread(target=model.generate , kwargs=__SCREAMING_SNAKE_CASE ) thread.start() __UpperCAmelCase ="""""" for new_text in streamer: streamer_text += new_text self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : Optional[int] ) -> int: __UpperCAmelCase =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) __UpperCAmelCase =AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =-1 __UpperCAmelCase =ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =greedy_ids[:, input_ids.shape[1] :] __UpperCAmelCase =tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: __UpperCAmelCase =TextStreamer(__SCREAMING_SNAKE_CASE , skip_prompt=__SCREAMING_SNAKE_CASE ) model.generate(__SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=__SCREAMING_SNAKE_CASE , streamer=__SCREAMING_SNAKE_CASE ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer __UpperCAmelCase =cs.out[:-1] self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : List[str] ) -> Any: # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them __UpperCAmelCase =AutoTokenizer.from_pretrained("""distilgpt2""" ) __UpperCAmelCase =AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =-1 __UpperCAmelCase =torch.ones((1, 5) , device=__SCREAMING_SNAKE_CASE ).long() * model.config.bos_token_id with CaptureStdout() as cs: __UpperCAmelCase =TextStreamer(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) model.generate(__SCREAMING_SNAKE_CASE , max_new_tokens=1 , do_sample=__SCREAMING_SNAKE_CASE , streamer=__SCREAMING_SNAKE_CASE ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token __UpperCAmelCase =cs.out[:-1] # Remove the final "\n" __UpperCAmelCase =tokenizer(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def _a ( self : Optional[int] ) -> List[Any]: __UpperCAmelCase =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) __UpperCAmelCase =AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =-1 __UpperCAmelCase =ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =TextIteratorStreamer(__SCREAMING_SNAKE_CASE , timeout=0.001 ) __UpperCAmelCase ={"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer} __UpperCAmelCase =Thread(target=model.generate , kwargs=__SCREAMING_SNAKE_CASE ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(__SCREAMING_SNAKE_CASE ): __UpperCAmelCase ="""""" for new_text in streamer: streamer_text += new_text
68
from __future__ import annotations def lowercase__ ( A_: list[list[int]] ) -> int: """simple docstring""" for i in range(1 , len(matrix[0] ) ): matrix[0][i] += matrix[0][i - 1] # preprocessing the first column for i in range(1 , len(A_ ) ): matrix[i][0] += matrix[i - 1][0] # updating the path cost for current position for i in range(1 , len(A_ ) ): for j in range(1 , len(matrix[0] ) ): matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] ) return matrix[-1][-1] if __name__ == "__main__": import doctest doctest.testmod()
68
1
from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging from transformers.integrations import is_fairscale_available from transformers.models.fsmt.configuration_fsmt import FSMTConfig from transformers.optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.trainer_pt_utils import get_tpu_sampler from transformers.training_args import ParallelMode from transformers.utils import is_torch_tpu_available if is_fairscale_available(): from fairscale.optim import OSS __A = logging.get_logger(__name__) __A = { "linear": get_linear_schedule_with_warmup, "cosine": get_cosine_schedule_with_warmup, "cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup, "polynomial": get_polynomial_decay_schedule_with_warmup, "constant": get_constant_schedule, "constant_w_warmup": get_constant_schedule_with_warmup, } class _A ( UpperCamelCase ): """simple docstring""" def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : str=None , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Any: super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if config is None: assert isinstance(self.model , __SCREAMING_SNAKE_CASE ), ( "If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is" f''' {self.model.__class__}''' ) __UpperCAmelCase =self.model.config else: __UpperCAmelCase =config __UpperCAmelCase =data_args __UpperCAmelCase =self.config.tgt_vocab_size if isinstance(self.config , __SCREAMING_SNAKE_CASE ) else self.config.vocab_size if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss): assert self.config.pad_token_id is not None, ( "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss" " calculation or doing label smoothing." ) if self.config.pad_token_id is None and self.config.eos_token_id is not None: logger.warning( f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for''' """ padding..""" ) if self.args.label_smoothing == 0: __UpperCAmelCase =torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id ) else: # dynamically import label_smoothed_nll_loss from utils import label_smoothed_nll_loss __UpperCAmelCase =label_smoothed_nll_loss def _a ( self : Any , __SCREAMING_SNAKE_CASE : int ) -> Any: if self.optimizer is None: __UpperCAmelCase =["""bias""", """LayerNorm.weight"""] __UpperCAmelCase =[ { """params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )], """weight_decay""": self.args.weight_decay, }, { """params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )], """weight_decay""": 0.0, }, ] __UpperCAmelCase =Adafactor if self.args.adafactor else AdamW if self.args.adafactor: __UpperCAmelCase =Adafactor __UpperCAmelCase ={"""scale_parameter""": False, """relative_step""": False} else: __UpperCAmelCase =AdamW __UpperCAmelCase ={ """betas""": (self.args.adam_betaa, self.args.adam_betaa), """eps""": self.args.adam_epsilon, } __UpperCAmelCase =self.args.learning_rate if self.sharded_ddp: __UpperCAmelCase =OSS( params=__SCREAMING_SNAKE_CASE , optim=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) else: __UpperCAmelCase =optimizer_cls(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if self.lr_scheduler is None: __UpperCAmelCase =self._get_lr_scheduler(__SCREAMING_SNAKE_CASE ) else: # ignoring --lr_scheduler logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" ) def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Any: __UpperCAmelCase =arg_to_scheduler[self.args.lr_scheduler] if self.args.lr_scheduler == "constant": __UpperCAmelCase =schedule_func(self.optimizer ) elif self.args.lr_scheduler == "constant_w_warmup": __UpperCAmelCase =schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps ) else: __UpperCAmelCase =schedule_func( self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__SCREAMING_SNAKE_CASE ) return scheduler def _a ( self : Optional[Any] ) -> Optional[torch.utils.data.Sampler]: if isinstance(self.train_dataset , torch.utils.data.IterableDataset ): return None elif is_torch_tpu_available(): return get_tpu_sampler(self.train_dataset ) else: if self.args.sortish_sampler: self.train_dataset.make_sortish_sampler( self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , ) return ( RandomSampler(self.train_dataset ) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset ) ) def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple: if self.args.label_smoothing == 0: if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token __UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[0] __UpperCAmelCase =self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) ) else: # compute usual loss via models __UpperCAmelCase , __UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[:2] else: # compute label smoothed loss __UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[0] __UpperCAmelCase =torch.nn.functional.log_softmax(__SCREAMING_SNAKE_CASE , dim=-1 ) __UpperCAmelCase , __UpperCAmelCase =self.loss_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.args.label_smoothing , ignore_index=self.config.pad_token_id ) return loss, logits def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Dict: __UpperCAmelCase =inputs.pop("""labels""" ) __UpperCAmelCase , __UpperCAmelCase =self._compute_loss(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return loss def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : nn.Module , __SCREAMING_SNAKE_CASE : Dict[str, Union[torch.Tensor, Any]] , __SCREAMING_SNAKE_CASE : bool , __SCREAMING_SNAKE_CASE : Optional[List[str]] = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: __UpperCAmelCase =self._prepare_inputs(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase ={ """max_length""": self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, """num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams, } if self.args.predict_with_generate and not self.args.prediction_loss_only: __UpperCAmelCase =self.model.generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **__SCREAMING_SNAKE_CASE , ) # in case the batch is shorter than max length, the output should be padded if generated_tokens.shape[-1] < gen_kwargs["max_length"]: __UpperCAmelCase =self._pad_tensors_to_max_len(__SCREAMING_SNAKE_CASE , gen_kwargs["""max_length"""] ) __UpperCAmelCase =inputs.pop("""labels""" ) with torch.no_grad(): # compute loss on predict data __UpperCAmelCase , __UpperCAmelCase =self._compute_loss(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =loss.mean().detach() if self.args.prediction_loss_only: return (loss, None, None) __UpperCAmelCase =generated_tokens if self.args.predict_with_generate else logits if labels.shape[-1] < gen_kwargs["max_length"]: __UpperCAmelCase =self._pad_tensors_to_max_len(__SCREAMING_SNAKE_CASE , gen_kwargs["""max_length"""] ) return (loss, logits, labels) def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int ) -> List[Any]: # If PAD token is not defined at least EOS token has to be defined __UpperCAmelCase =self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id if pad_token_id is None: raise ValueError( """Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be""" f''' padded to `max_length`={max_length}''' ) __UpperCAmelCase =pad_token_id * torch.ones( (tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device ) __UpperCAmelCase =tensor return padded_tensor
68
import numpy as np from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey def lowercase__ ( A_: int , A_: int , A_: int , A_: int , A_: int , A_: int ) -> np.ndarray: """simple docstring""" if (ksize % 2) == 0: __UpperCAmelCase =ksize + 1 __UpperCAmelCase =np.zeros((ksize, ksize) , dtype=np.floataa ) # each value for y in range(A_ ): for x in range(A_ ): # distance from center __UpperCAmelCase =x - ksize // 2 __UpperCAmelCase =y - ksize // 2 # degree to radiant __UpperCAmelCase =theta / 180 * np.pi __UpperCAmelCase =np.cos(_theta ) __UpperCAmelCase =np.sin(_theta ) # get kernel x __UpperCAmelCase =cos_theta * px + sin_theta * py # get kernel y __UpperCAmelCase =-sin_theta * px + cos_theta * py # fill kernel __UpperCAmelCase =np.exp( -(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi ) return gabor if __name__ == "__main__": import doctest doctest.testmod() # read original image __A = imread("../image_data/lena.jpg") # turn image in gray scale value __A = cvtColor(img, COLOR_BGR2GRAY) # Apply multiple Kernel to detect edges __A = np.zeros(gray.shape[:2]) for theta in [0, 30, 60, 90, 1_20, 1_50]: __A = gabor_filter_kernel(10, 8, theta, 10, 0, 0) out += filteraD(gray, CV_8UC3, kernel_aa) __A = out / out.max() * 2_55 __A = out.astype(np.uinta) imshow("Original", gray) imshow("Gabor filter with 20x20 mask and 6 directions", out) waitKey(0)
68
1
def lowercase__ ( A_: float , A_: float , A_: float , A_: float , A_: float , ) -> float: """simple docstring""" __UpperCAmelCase =[redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError("""All input parameters must be positive""" ) if any(p > 1 for p in parameters[1:4] ): raise ValueError("""Relative densities cannot be greater than one""" ) else: __UpperCAmelCase =1 - (matter_density + radiation_density + dark_energy) __UpperCAmelCase =( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) __UpperCAmelCase =hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation __A = 0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1E-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
68
import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _A : """simple docstring""" def __init__( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any]=13 , __SCREAMING_SNAKE_CASE : Dict=32 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Optional[int]=3 , __SCREAMING_SNAKE_CASE : List[str]=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=[1, 2, 1] , __SCREAMING_SNAKE_CASE : List[Any]=[2, 2, 4] , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : Any=2.0 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : int=0.0 , __SCREAMING_SNAKE_CASE : Dict=0.0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : List[Any]=0.02 , __SCREAMING_SNAKE_CASE : Tuple=1e-5 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Any=10 , __SCREAMING_SNAKE_CASE : Dict=8 , ) -> List[Any]: __UpperCAmelCase =parent __UpperCAmelCase =batch_size __UpperCAmelCase =image_size __UpperCAmelCase =patch_size __UpperCAmelCase =num_channels __UpperCAmelCase =embed_dim __UpperCAmelCase =depths __UpperCAmelCase =num_heads __UpperCAmelCase =window_size __UpperCAmelCase =mlp_ratio __UpperCAmelCase =qkv_bias __UpperCAmelCase =hidden_dropout_prob __UpperCAmelCase =attention_probs_dropout_prob __UpperCAmelCase =drop_path_rate __UpperCAmelCase =hidden_act __UpperCAmelCase =use_absolute_embeddings __UpperCAmelCase =patch_norm __UpperCAmelCase =layer_norm_eps __UpperCAmelCase =initializer_range __UpperCAmelCase =is_training __UpperCAmelCase =scope __UpperCAmelCase =use_labels __UpperCAmelCase =type_sequence_label_size __UpperCAmelCase =encoder_stride def _a ( self : Tuple ) -> Optional[int]: __UpperCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCAmelCase =None if self.use_labels: __UpperCAmelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase =self.get_config() return config, pixel_values, labels def _a ( self : List[Any] ) -> Optional[Any]: return SwinvaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def _a ( self : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict ) -> Optional[int]: __UpperCAmelCase =SwinvaModel(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __UpperCAmelCase =model(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) __UpperCAmelCase =int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def _a ( self : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int ) -> Tuple: __UpperCAmelCase =SwinvaForMaskedImageModeling(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __UpperCAmelCase =model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images __UpperCAmelCase =1 __UpperCAmelCase =SwinvaForMaskedImageModeling(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __UpperCAmelCase =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __UpperCAmelCase =model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple: __UpperCAmelCase =self.type_sequence_label_size __UpperCAmelCase =SwinvaForImageClassification(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __UpperCAmelCase =model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _a ( self : List[str] ) -> Tuple: __UpperCAmelCase =self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =config_and_inputs __UpperCAmelCase ={"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _A ( UpperCamelCase , UpperCamelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase : Optional[int] = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) lowerCamelCase : Tuple = ( {'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification} if is_torch_available() else {} ) lowerCamelCase : Dict = False lowerCamelCase : Tuple = False lowerCamelCase : List[str] = False lowerCamelCase : Tuple = False def _a ( self : str ) -> str: __UpperCAmelCase =SwinvaModelTester(self ) __UpperCAmelCase =ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , embed_dim=37 ) def _a ( self : List[Any] ) -> Optional[int]: self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _a ( self : str ) -> str: __UpperCAmelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) @unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" ) def _a ( self : Tuple ) -> Tuple: pass @unittest.skip(reason="""Swinv2 does not use inputs_embeds""" ) def _a ( self : Optional[Any] ) -> int: pass def _a ( self : Tuple ) -> int: __UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __UpperCAmelCase =model.get_output_embeddings() self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) ) def _a ( self : str ) -> List[str]: __UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCAmelCase =[*signature.parameters.keys()] __UpperCAmelCase =["""pixel_values"""] self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE ) def _a ( self : Tuple ) -> Tuple: __UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase =True for model_class in self.all_model_classes: __UpperCAmelCase =True __UpperCAmelCase =False __UpperCAmelCase =True __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): __UpperCAmelCase =model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) __UpperCAmelCase =outputs.attentions __UpperCAmelCase =len(self.model_tester.depths ) self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __UpperCAmelCase =True __UpperCAmelCase =config.window_size**2 __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): __UpperCAmelCase =model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) __UpperCAmelCase =outputs.attentions self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) __UpperCAmelCase =len(__SCREAMING_SNAKE_CASE ) # Check attention is always last and order is fine __UpperCAmelCase =True __UpperCAmelCase =True __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): __UpperCAmelCase =model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) if hasattr(self.model_tester , """num_hidden_states_types""" ): __UpperCAmelCase =self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states __UpperCAmelCase =2 self.assertEqual(out_len + added_hidden_states , len(__SCREAMING_SNAKE_CASE ) ) __UpperCAmelCase =outputs.attentions self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> int: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): __UpperCAmelCase =model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) __UpperCAmelCase =outputs.hidden_states __UpperCAmelCase =getattr( self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) # Swinv2 has a different seq_length __UpperCAmelCase =( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) __UpperCAmelCase =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) __UpperCAmelCase =outputs.reshaped_hidden_states self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =reshaped_hidden_states[0].shape __UpperCAmelCase =( reshaped_hidden_states[0].view(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def _a ( self : str ) -> Union[str, Any]: __UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase =( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: __UpperCAmelCase =True self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase =True self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : Optional[int] ) -> Tuple: __UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase =3 __UpperCAmelCase =( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) __UpperCAmelCase =( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) __UpperCAmelCase =image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) __UpperCAmelCase =image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: __UpperCAmelCase =True self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase =True self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (padded_height, padded_width) ) def _a ( self : Optional[int] ) -> Tuple: __UpperCAmelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__SCREAMING_SNAKE_CASE ) def _a ( self : Tuple ) -> Dict: __UpperCAmelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE ) @slow def _a ( self : int ) -> Dict: for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase =SwinvaModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) def _a ( self : Dict ) -> Union[str, Any]: __UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase =_config_zero_init(__SCREAMING_SNAKE_CASE ) for model_class in self.all_model_classes: __UpperCAmelCase =model_class(config=__SCREAMING_SNAKE_CASE ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @require_vision @require_torch class _A ( unittest.TestCase ): """simple docstring""" @cached_property def _a ( self : Tuple ) -> Dict: return ( AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ) if is_vision_available() else None ) @slow def _a ( self : int ) -> Optional[int]: __UpperCAmelCase =SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to( __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =self.default_image_processor __UpperCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) __UpperCAmelCase =image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): __UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE ) # verify the logits __UpperCAmelCase =torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =torch.tensor([-0.3_947, -0.4_306, 0.0_026] ).to(__SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
68
1
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class _A : """simple docstring""" def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any]=2 , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=10 , __SCREAMING_SNAKE_CASE : List[Any]=3 , __SCREAMING_SNAKE_CASE : Any=32 * 8 , __SCREAMING_SNAKE_CASE : Union[str, Any]=32 * 8 , __SCREAMING_SNAKE_CASE : Any=4 , __SCREAMING_SNAKE_CASE : Dict=64 , ) -> Any: __UpperCAmelCase =parent __UpperCAmelCase =batch_size __UpperCAmelCase =is_training __UpperCAmelCase =use_auxiliary_loss __UpperCAmelCase =num_queries __UpperCAmelCase =num_channels __UpperCAmelCase =min_size __UpperCAmelCase =max_size __UpperCAmelCase =num_labels __UpperCAmelCase =hidden_dim __UpperCAmelCase =hidden_dim def _a ( self : Optional[Any] ) -> Union[str, Any]: __UpperCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =torch.ones([self.batch_size, self.min_size, self.max_size] , device=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__SCREAMING_SNAKE_CASE ) > 0.5 ).float() __UpperCAmelCase =(torch.rand((self.batch_size, self.num_labels) , device=__SCREAMING_SNAKE_CASE ) > 0.5).long() __UpperCAmelCase =self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def _a ( self : Optional[int] ) -> str: __UpperCAmelCase =MaskaFormerConfig( hidden_size=self.hidden_dim , ) __UpperCAmelCase =self.num_queries __UpperCAmelCase =self.num_labels __UpperCAmelCase =[1, 1, 1, 1] __UpperCAmelCase =self.num_channels __UpperCAmelCase =64 __UpperCAmelCase =128 __UpperCAmelCase =self.hidden_dim __UpperCAmelCase =self.hidden_dim __UpperCAmelCase =self.hidden_dim return config def _a ( self : List[str] ) -> Any: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self.prepare_config_and_inputs() __UpperCAmelCase ={"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask} return config, inputs_dict def _a ( self : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Dict: __UpperCAmelCase =output.encoder_hidden_states __UpperCAmelCase =output.pixel_decoder_hidden_states __UpperCAmelCase =output.transformer_decoder_hidden_states self.parent.assertTrue(len(__SCREAMING_SNAKE_CASE ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__SCREAMING_SNAKE_CASE ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__SCREAMING_SNAKE_CASE ) , config.decoder_layers ) def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[int]=False ) -> List[str]: with torch.no_grad(): __UpperCAmelCase =MaskaFormerModel(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __UpperCAmelCase =model(pixel_values=__SCREAMING_SNAKE_CASE , pixel_mask=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model(__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Any: __UpperCAmelCase =MaskaFormerForUniversalSegmentation(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() def comm_check_on_output(__SCREAMING_SNAKE_CASE : Any ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): __UpperCAmelCase =model(pixel_values=__SCREAMING_SNAKE_CASE , pixel_mask=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model(__SCREAMING_SNAKE_CASE ) comm_check_on_output(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model( pixel_values=__SCREAMING_SNAKE_CASE , pixel_mask=__SCREAMING_SNAKE_CASE , mask_labels=__SCREAMING_SNAKE_CASE , class_labels=__SCREAMING_SNAKE_CASE ) comm_check_on_output(__SCREAMING_SNAKE_CASE ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class _A ( UpperCamelCase , UpperCamelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase : Any = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () lowerCamelCase : Optional[int] = {'feature-extraction': MaskaFormerModel} if is_torch_available() else {} lowerCamelCase : str = False lowerCamelCase : Any = False lowerCamelCase : int = False lowerCamelCase : List[str] = False def _a ( self : str ) -> str: __UpperCAmelCase =MaskaFormerModelTester(self ) __UpperCAmelCase =ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE ) def _a ( self : List[Any] ) -> Tuple: self.config_tester.run_common_tests() def _a ( self : Any ) -> Any: __UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE ) def _a ( self : Optional[int] ) -> List[str]: __UpperCAmelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*__SCREAMING_SNAKE_CASE ) @unittest.skip(reason="""Mask2Former does not use inputs_embeds""" ) def _a ( self : Union[str, Any] ) -> List[str]: pass @unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" ) def _a ( self : str ) -> List[str]: pass @unittest.skip(reason="""Mask2Former is not a generative model""" ) def _a ( self : Any ) -> Any: pass @unittest.skip(reason="""Mask2Former does not use token embeddings""" ) def _a ( self : Union[str, Any] ) -> int: pass @require_torch_multi_gpu @unittest.skip( reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def _a ( self : List[Any] ) -> int: pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def _a ( self : Union[str, Any] ) -> Tuple: pass def _a ( self : str ) -> Any: __UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCAmelCase =[*signature.parameters.keys()] __UpperCAmelCase =["""pixel_values"""] self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE ) @slow def _a ( self : Dict ) -> str: for model_name in ["facebook/mask2former-swin-small-coco-instance"]: __UpperCAmelCase =MaskaFormerModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) def _a ( self : Optional[Any] ) -> Union[str, Any]: __UpperCAmelCase =(self.model_tester.min_size,) * 2 __UpperCAmelCase ={ """pixel_values""": torch.randn((2, 3, *size) , device=__SCREAMING_SNAKE_CASE ), """mask_labels""": torch.randn((2, 10, *size) , device=__SCREAMING_SNAKE_CASE ), """class_labels""": torch.zeros(2 , 10 , device=__SCREAMING_SNAKE_CASE ).long(), } __UpperCAmelCase =self.model_tester.get_config() __UpperCAmelCase =MaskaFormerForUniversalSegmentation(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE ) self.assertTrue(outputs.loss is not None ) def _a ( self : Any ) -> int: __UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE ) def _a ( self : Optional[int] ) -> Union[str, Any]: __UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE , output_attentions=__SCREAMING_SNAKE_CASE ) self.assertTrue(outputs.attentions is not None ) def _a ( self : Optional[int] ) -> Optional[Any]: if not self.model_tester.is_training: return __UpperCAmelCase =self.all_model_classes[1] __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs() __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.train() __UpperCAmelCase =model(__SCREAMING_SNAKE_CASE , mask_labels=__SCREAMING_SNAKE_CASE , class_labels=__SCREAMING_SNAKE_CASE ).loss loss.backward() def _a ( self : List[Any] ) -> List[str]: __UpperCAmelCase =self.all_model_classes[1] __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs() __UpperCAmelCase =True __UpperCAmelCase =True __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE ) model.train() __UpperCAmelCase =model(__SCREAMING_SNAKE_CASE , mask_labels=__SCREAMING_SNAKE_CASE , class_labels=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() __UpperCAmelCase =outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() __UpperCAmelCase =outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() __UpperCAmelCase =outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) __A = 1E-4 def lowercase__ ( ) -> Optional[Any]: """simple docstring""" __UpperCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_vision @slow class _A ( unittest.TestCase ): """simple docstring""" @cached_property def _a ( self : Optional[int] ) -> str: return "facebook/mask2former-swin-small-coco-instance" @cached_property def _a ( self : int ) -> List[str]: return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None def _a ( self : Optional[Any] ) -> Any: __UpperCAmelCase =MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =self.default_image_processor __UpperCAmelCase =prepare_img() __UpperCAmelCase =image_processor(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__SCREAMING_SNAKE_CASE , (1, 3, 384, 384) ) with torch.no_grad(): __UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =torch.tensor( [[-0.2_790, -1.0_717, -1.1_668], [-0.5_128, -0.3_128, -0.4_987], [-0.5_832, 0.1_971, -0.0_197]] ).to(__SCREAMING_SNAKE_CASE ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) ) __UpperCAmelCase =torch.tensor( [[0.8_973, 1.1_847, 1.1_776], [1.1_934, 1.5_040, 1.5_128], [1.1_153, 1.4_486, 1.4_951]] ).to(__SCREAMING_SNAKE_CASE ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) ) __UpperCAmelCase =torch.tensor( [[2.1_152, 1.7_000, -0.8_603], [1.5_808, 1.8_004, -0.9_353], [1.6_043, 1.7_495, -0.5_999]] ).to(__SCREAMING_SNAKE_CASE ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) ) def _a ( self : Union[str, Any] ) -> Optional[Any]: __UpperCAmelCase =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__SCREAMING_SNAKE_CASE ).eval() __UpperCAmelCase =self.default_image_processor __UpperCAmelCase =prepare_img() __UpperCAmelCase =image_processor(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__SCREAMING_SNAKE_CASE , (1, 3, 384, 384) ) with torch.no_grad(): __UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE ) # masks_queries_logits __UpperCAmelCase =outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ) __UpperCAmelCase =[ [-8.7_839, -9.0_056, -8.8_121], [-7.4_104, -7.0_313, -6.5_401], [-6.6_105, -6.3_427, -6.4_675], ] __UpperCAmelCase =torch.tensor(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) ) # class_queries_logits __UpperCAmelCase =outputs.class_queries_logits self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) ) __UpperCAmelCase =torch.tensor( [ [1.8_324, -8.0_835, -4.1_922], [0.8_450, -9.0_050, -3.6_053], [0.3_045, -7.7_293, -3.0_275], ] ).to(__SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) ) def _a ( self : int ) -> int: __UpperCAmelCase =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__SCREAMING_SNAKE_CASE ).eval() __UpperCAmelCase =self.default_image_processor __UpperCAmelCase =image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="""pt""" , ) __UpperCAmelCase =inputs["""pixel_values"""].to(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =[el.to(__SCREAMING_SNAKE_CASE ) for el in inputs["""mask_labels"""]] __UpperCAmelCase =[el.to(__SCREAMING_SNAKE_CASE ) for el in inputs["""class_labels"""]] with torch.no_grad(): __UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE ) self.assertTrue(outputs.loss is not None )
68
import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __A = logging.get_logger(__name__) __A = {"vocab_file": "spiece.model"} __A = { "vocab_file": { "AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model", } } __A = { "AI-Sweden/gpt-sw3-126m": 20_48, "AI-Sweden/gpt-sw3-350m": 20_48, "AI-Sweden/gpt-sw3-1.6b": 20_48, "AI-Sweden/gpt-sw3-6.7b": 20_48, "AI-Sweden/gpt-sw3-20b": 20_48, } class _A ( UpperCamelCase ): """simple docstring""" lowerCamelCase : int = VOCAB_FILES_NAMES lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase : Optional[Any] = ['input_ids', 'attention_mask'] def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : Optional[Any] , ) -> None: __UpperCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs __UpperCAmelCase =kwargs.get("""name_or_path""" ) if name_or_path is None: logger.warning( """name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,""" """ you are testing the model, this can safely be ignored""" ) __UpperCAmelCase ="""None""" # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing __UpperCAmelCase ="""<|endoftext|>""" if eos_token is None else eos_token __UpperCAmelCase ="""<unk>""" if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: __UpperCAmelCase =unk_token if pad_token is None else pad_token __UpperCAmelCase =eos_token if bos_token is None else bos_token else: __UpperCAmelCase ="""<pad>""" if pad_token is None else pad_token __UpperCAmelCase ="""<s>""" if bos_token is None else bos_token super().__init__( do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , ) __UpperCAmelCase =do_lower_case __UpperCAmelCase =remove_space __UpperCAmelCase =keep_accents __UpperCAmelCase =vocab_file __UpperCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__SCREAMING_SNAKE_CASE ) # Used for whitespace normalization in input texts # fmt : off __UpperCAmelCase ={""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """„"""} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing __UpperCAmelCase =re.compile( f'''[{"".join(map(__SCREAMING_SNAKE_CASE , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]''' ) def __getstate__( self : Any ) -> str: __UpperCAmelCase =self.__dict__.copy() __UpperCAmelCase =None return state def __setstate__( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]: __UpperCAmelCase =d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): __UpperCAmelCase ={} __UpperCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def _a ( self : Union[str, Any] ) -> int: return len(self.sp_model ) def _a ( self : Dict , __SCREAMING_SNAKE_CASE : str ) -> str: __UpperCAmelCase =self.non_printing_characters_re.sub("""""" , __SCREAMING_SNAKE_CASE ) # Normalize whitespaces __UpperCAmelCase ="""""".join([char if char not in self.whitespaces else """ """ for char in text] ) # NFC Unicode normalization __UpperCAmelCase =unicodedata.normalize("""NFC""" , __SCREAMING_SNAKE_CASE ) return text def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[str]: __UpperCAmelCase =self.preprocess_text(__SCREAMING_SNAKE_CASE ) return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE ) def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str ) -> int: return self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE ) def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : int ) -> str: return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE ) @staticmethod def _a ( __SCREAMING_SNAKE_CASE : str ) -> str: return out_string def _a ( self : Any , __SCREAMING_SNAKE_CASE : List[str] ) -> str: __UpperCAmelCase =[] __UpperCAmelCase ="""""" __UpperCAmelCase =False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token __UpperCAmelCase =True __UpperCAmelCase =[] else: current_sub_tokens.append(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =False out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) return out_string def _a ( self : Any ) -> Dict[str, int]: __UpperCAmelCase ={self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _a ( self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__SCREAMING_SNAKE_CASE ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __UpperCAmelCase =os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(__SCREAMING_SNAKE_CASE , """wb""" ) as fi: __UpperCAmelCase =self.sp_model.serialized_model_proto() fi.write(__SCREAMING_SNAKE_CASE ) return (out_vocab_file,) def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : Union[str, bool] = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]: if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __UpperCAmelCase =self.preprocess_text(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =self.sp_model.encode(__SCREAMING_SNAKE_CASE ) else: __UpperCAmelCase =[self.preprocess_text(__SCREAMING_SNAKE_CASE ) for t in text] __UpperCAmelCase =self.sp_model.encode(__SCREAMING_SNAKE_CASE ) if return_tensors is True or return_tensors == "pt": __UpperCAmelCase =torch.tensor(__SCREAMING_SNAKE_CASE ) return token_ids def _a ( self : str , __SCREAMING_SNAKE_CASE : Union[int, List[int]] ) -> str: return self.sp_model.decode(__SCREAMING_SNAKE_CASE ) def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : "Conversation" ) -> List[int]: __UpperCAmelCase =[f'''User: {text}''' if is_user else f'''Bot: {text}''' for is_user, text in conversation.iter_texts()] __UpperCAmelCase =( f'''{self.eos_token}{self.bos_token}''' + f'''{self.bos_token}'''.join(__SCREAMING_SNAKE_CASE ) + f'''{self.bos_token}Bot:''' ) return self.encode(text=__SCREAMING_SNAKE_CASE )
68
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __A = { "configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["LlamaTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["LlamaTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "LlamaForCausalLM", "LlamaModel", "LlamaPreTrainedModel", "LlamaForSequenceClassification", ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
68
import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow __A = logging.getLogger() @unittest.skip('Temporarily disable the doc tests.' ) @require_torch @require_tf @slow class _A ( unittest.TestCase ): """simple docstring""" def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Path , __SCREAMING_SNAKE_CASE : Union[str, None] = None , __SCREAMING_SNAKE_CASE : Union[List[str], None] = None , __SCREAMING_SNAKE_CASE : Union[str, List[str], None] = None , __SCREAMING_SNAKE_CASE : bool = True , ) -> List[str]: __UpperCAmelCase =[file for file in os.listdir(__SCREAMING_SNAKE_CASE ) if os.path.isfile(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )] if identifier is not None: __UpperCAmelCase =[file for file in files if identifier in file] if n_identifier is not None: if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): for n_ in n_identifier: __UpperCAmelCase =[file for file in files if n_ not in file] else: __UpperCAmelCase =[file for file in files if n_identifier not in file] __UpperCAmelCase =ignore_files or [] ignore_files.append("""__init__.py""" ) __UpperCAmelCase =[file for file in files if file not in ignore_files] for file in files: # Open all files print("""Testing""" , __SCREAMING_SNAKE_CASE ) if only_modules: __UpperCAmelCase =file.split(""".""" )[0] try: __UpperCAmelCase =getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =doctest.DocTestSuite(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =unittest.TextTestRunner().run(__SCREAMING_SNAKE_CASE ) self.assertIs(len(result.failures ) , 0 ) except AttributeError: logger.info(f'''{module_identifier} is not a module.''' ) else: __UpperCAmelCase =doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed , 0 ) def _a ( self : Optional[Any] ) -> List[str]: __UpperCAmelCase =Path("""src/transformers""" ) __UpperCAmelCase ="""modeling""" __UpperCAmelCase =[ """modeling_ctrl.py""", """modeling_tf_ctrl.py""", ] self.analyze_directory(__SCREAMING_SNAKE_CASE , identifier=__SCREAMING_SNAKE_CASE , ignore_files=__SCREAMING_SNAKE_CASE ) def _a ( self : Tuple ) -> Optional[int]: __UpperCAmelCase =Path("""src/transformers""" ) __UpperCAmelCase ="""tokenization""" self.analyze_directory(__SCREAMING_SNAKE_CASE , identifier=__SCREAMING_SNAKE_CASE ) def _a ( self : Optional[Any] ) -> Optional[Any]: __UpperCAmelCase =Path("""src/transformers""" ) __UpperCAmelCase ="""configuration""" self.analyze_directory(__SCREAMING_SNAKE_CASE , identifier=__SCREAMING_SNAKE_CASE ) def _a ( self : List[Any] ) -> Tuple: __UpperCAmelCase =Path("""src/transformers""" ) __UpperCAmelCase =["""configuration""", """modeling""", """tokenization"""] self.analyze_directory(__SCREAMING_SNAKE_CASE , n_identifier=__SCREAMING_SNAKE_CASE ) def _a ( self : Any ) -> Tuple: __UpperCAmelCase =Path("""docs/source""" ) __UpperCAmelCase =["""favicon.ico"""] self.analyze_directory(__SCREAMING_SNAKE_CASE , ignore_files=__SCREAMING_SNAKE_CASE , only_modules=__SCREAMING_SNAKE_CASE )
68
1
import random import unittest import numpy as np import transformers from transformers import is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax if is_flax_available(): import os import jax.numpy as jnp from jax import jit from transformers import AutoTokenizer, FlaxAutoModelForCausalLM from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model __A = "0.12" # assumed parallelism: 8 if is_torch_available(): import torch def lowercase__ ( A_: int , A_: Optional[Any] , A_: List[str]=None ) -> List[str]: """simple docstring""" if rng is None: __UpperCAmelCase =random.Random() __UpperCAmelCase =1 for dim in shape: total_dims *= dim __UpperCAmelCase =[] for _ in range(A_ ): values.append(rng.randint(0 , vocab_size - 1 ) ) __UpperCAmelCase =np.array(A_ , dtype=jnp.intaa ).reshape(A_ ) return output def lowercase__ ( A_: List[str] , A_: List[str]=None ) -> Any: """simple docstring""" __UpperCAmelCase =ids_tensor(A_ , vocab_size=2 , rng=A_ ) # make sure that at least one token is attended to for each batch __UpperCAmelCase =1 return attn_mask @require_flax class _A : """simple docstring""" lowerCamelCase : Optional[Any] = None lowerCamelCase : int = () def _a ( self : str ) -> Tuple: __UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() # cut to half length & take max batch_size 3 __UpperCAmelCase =2 __UpperCAmelCase =inputs["""input_ids"""].shape[-1] // 2 __UpperCAmelCase =inputs["""input_ids"""][:max_batch_size, :sequence_length] __UpperCAmelCase =jnp.ones_like(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =attention_mask[:max_batch_size, :sequence_length] # generate max 5 tokens __UpperCAmelCase =input_ids.shape[-1] + 5 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` __UpperCAmelCase =config.eos_token_id return config, input_ids, attention_mask, max_length @is_pt_flax_cross_test def _a ( self : Union[str, Any] ) -> Optional[int]: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() __UpperCAmelCase =False __UpperCAmelCase =max_length __UpperCAmelCase =0 for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model_class.__name__[4:] # Skip the "Flax" at the beginning __UpperCAmelCase =getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =pt_model_class(__SCREAMING_SNAKE_CASE ).eval() __UpperCAmelCase =load_flax_weights_in_pytorch_model(__SCREAMING_SNAKE_CASE , flax_model.params ) __UpperCAmelCase =flax_model.generate(__SCREAMING_SNAKE_CASE ).sequences __UpperCAmelCase =pt_model.generate(torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.long ) ) if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]: __UpperCAmelCase =flax_generation_outputs[:, : pt_generation_outputs.shape[-1]] self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() ) def _a ( self : Optional[int] ) -> Dict: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() __UpperCAmelCase =False __UpperCAmelCase =max_length for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =jit(model.generate ) __UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : Union[str, Any] ) -> List[str]: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() __UpperCAmelCase =True __UpperCAmelCase =max_length for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =jit(model.generate ) __UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : List[Any] ) -> Any: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() __UpperCAmelCase =False __UpperCAmelCase =max_length __UpperCAmelCase =2 for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =jit(model.generate ) __UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : Any ) -> Tuple: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() __UpperCAmelCase =False __UpperCAmelCase =max_length __UpperCAmelCase =2 __UpperCAmelCase =2 for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences ) def _a ( self : Union[str, Any] ) -> List[Any]: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() __UpperCAmelCase =True __UpperCAmelCase =max_length __UpperCAmelCase =0.8 __UpperCAmelCase =10 __UpperCAmelCase =0.3 __UpperCAmelCase =1 __UpperCAmelCase =8 __UpperCAmelCase =9 for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =jit(model.generate ) __UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : Union[str, Any] ) -> Optional[Any]: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() __UpperCAmelCase =max_length __UpperCAmelCase =1 __UpperCAmelCase =8 __UpperCAmelCase =9 for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =jit(model.generate ) __UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : Optional[int] ) -> Any: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() __UpperCAmelCase =max_length __UpperCAmelCase =2 __UpperCAmelCase =1 __UpperCAmelCase =8 __UpperCAmelCase =9 for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =jit(model.generate ) __UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : List[str] ) -> Dict: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() # pad attention mask on the left __UpperCAmelCase =attention_mask.at[(0, 0)].set(0 ) __UpperCAmelCase =False __UpperCAmelCase =max_length for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =jit(model.generate ) __UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : Dict ) -> Tuple: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() # pad attention mask on the left __UpperCAmelCase =attention_mask.at[(0, 0)].set(0 ) __UpperCAmelCase =True __UpperCAmelCase =max_length for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =jit(model.generate ) __UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : Dict ) -> Tuple: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() # pad attention mask on the left __UpperCAmelCase =attention_mask.at[(0, 0)].set(0 ) __UpperCAmelCase =2 __UpperCAmelCase =max_length for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =jit(model.generate ) __UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) @require_flax class _A ( unittest.TestCase ): """simple docstring""" def _a ( self : int ) -> Any: __UpperCAmelCase =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-bert""" ) __UpperCAmelCase =FlaxAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" ) __UpperCAmelCase ="""Hello world""" __UpperCAmelCase =tokenizer(__SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_ids # typos are quickly detected (the correct argument is `do_sample`) with self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , """do_samples""" ): model.generate(__SCREAMING_SNAKE_CASE , do_samples=__SCREAMING_SNAKE_CASE ) # arbitrary arguments that will not be used anywhere are also not accepted with self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , """foo""" ): __UpperCAmelCase ={"""foo""": """bar"""} model.generate(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
68
import random import unittest import numpy as np import transformers from transformers import is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax if is_flax_available(): import os import jax.numpy as jnp from jax import jit from transformers import AutoTokenizer, FlaxAutoModelForCausalLM from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model __A = "0.12" # assumed parallelism: 8 if is_torch_available(): import torch def lowercase__ ( A_: int , A_: Optional[Any] , A_: List[str]=None ) -> List[str]: """simple docstring""" if rng is None: __UpperCAmelCase =random.Random() __UpperCAmelCase =1 for dim in shape: total_dims *= dim __UpperCAmelCase =[] for _ in range(A_ ): values.append(rng.randint(0 , vocab_size - 1 ) ) __UpperCAmelCase =np.array(A_ , dtype=jnp.intaa ).reshape(A_ ) return output def lowercase__ ( A_: List[str] , A_: List[str]=None ) -> Any: """simple docstring""" __UpperCAmelCase =ids_tensor(A_ , vocab_size=2 , rng=A_ ) # make sure that at least one token is attended to for each batch __UpperCAmelCase =1 return attn_mask @require_flax class _A : """simple docstring""" lowerCamelCase : Optional[Any] = None lowerCamelCase : int = () def _a ( self : str ) -> Tuple: __UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() # cut to half length & take max batch_size 3 __UpperCAmelCase =2 __UpperCAmelCase =inputs["""input_ids"""].shape[-1] // 2 __UpperCAmelCase =inputs["""input_ids"""][:max_batch_size, :sequence_length] __UpperCAmelCase =jnp.ones_like(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =attention_mask[:max_batch_size, :sequence_length] # generate max 5 tokens __UpperCAmelCase =input_ids.shape[-1] + 5 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` __UpperCAmelCase =config.eos_token_id return config, input_ids, attention_mask, max_length @is_pt_flax_cross_test def _a ( self : Union[str, Any] ) -> Optional[int]: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() __UpperCAmelCase =False __UpperCAmelCase =max_length __UpperCAmelCase =0 for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model_class.__name__[4:] # Skip the "Flax" at the beginning __UpperCAmelCase =getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =pt_model_class(__SCREAMING_SNAKE_CASE ).eval() __UpperCAmelCase =load_flax_weights_in_pytorch_model(__SCREAMING_SNAKE_CASE , flax_model.params ) __UpperCAmelCase =flax_model.generate(__SCREAMING_SNAKE_CASE ).sequences __UpperCAmelCase =pt_model.generate(torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.long ) ) if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]: __UpperCAmelCase =flax_generation_outputs[:, : pt_generation_outputs.shape[-1]] self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() ) def _a ( self : Optional[int] ) -> Dict: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() __UpperCAmelCase =False __UpperCAmelCase =max_length for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =jit(model.generate ) __UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : Union[str, Any] ) -> List[str]: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() __UpperCAmelCase =True __UpperCAmelCase =max_length for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =jit(model.generate ) __UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : List[Any] ) -> Any: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() __UpperCAmelCase =False __UpperCAmelCase =max_length __UpperCAmelCase =2 for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =jit(model.generate ) __UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : Any ) -> Tuple: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() __UpperCAmelCase =False __UpperCAmelCase =max_length __UpperCAmelCase =2 __UpperCAmelCase =2 for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences ) def _a ( self : Union[str, Any] ) -> List[Any]: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() __UpperCAmelCase =True __UpperCAmelCase =max_length __UpperCAmelCase =0.8 __UpperCAmelCase =10 __UpperCAmelCase =0.3 __UpperCAmelCase =1 __UpperCAmelCase =8 __UpperCAmelCase =9 for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =jit(model.generate ) __UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : Union[str, Any] ) -> Optional[Any]: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() __UpperCAmelCase =max_length __UpperCAmelCase =1 __UpperCAmelCase =8 __UpperCAmelCase =9 for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =jit(model.generate ) __UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : Optional[int] ) -> Any: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() __UpperCAmelCase =max_length __UpperCAmelCase =2 __UpperCAmelCase =1 __UpperCAmelCase =8 __UpperCAmelCase =9 for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =jit(model.generate ) __UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : List[str] ) -> Dict: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() # pad attention mask on the left __UpperCAmelCase =attention_mask.at[(0, 0)].set(0 ) __UpperCAmelCase =False __UpperCAmelCase =max_length for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =jit(model.generate ) __UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : Dict ) -> Tuple: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() # pad attention mask on the left __UpperCAmelCase =attention_mask.at[(0, 0)].set(0 ) __UpperCAmelCase =True __UpperCAmelCase =max_length for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =jit(model.generate ) __UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : Dict ) -> Tuple: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() # pad attention mask on the left __UpperCAmelCase =attention_mask.at[(0, 0)].set(0 ) __UpperCAmelCase =2 __UpperCAmelCase =max_length for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =jit(model.generate ) __UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) @require_flax class _A ( unittest.TestCase ): """simple docstring""" def _a ( self : int ) -> Any: __UpperCAmelCase =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-bert""" ) __UpperCAmelCase =FlaxAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" ) __UpperCAmelCase ="""Hello world""" __UpperCAmelCase =tokenizer(__SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_ids # typos are quickly detected (the correct argument is `do_sample`) with self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , """do_samples""" ): model.generate(__SCREAMING_SNAKE_CASE , do_samples=__SCREAMING_SNAKE_CASE ) # arbitrary arguments that will not be used anywhere are also not accepted with self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , """foo""" ): __UpperCAmelCase ={"""foo""": """bar"""} model.generate(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
68
1
from __future__ import annotations __A = [True] * 1_00_00_01 __A = 2 while i * i <= 1_00_00_00: if seive[i]: for j in range(i * i, 1_00_00_01, i): __A = False i += 1 def lowercase__ ( A_: int ) -> bool: """simple docstring""" return seive[n] def lowercase__ ( A_: int ) -> bool: """simple docstring""" return any(digit in """02468""" for digit in str(A_ ) ) def lowercase__ ( A_: int = 1000000 ) -> list[int]: """simple docstring""" __UpperCAmelCase =[2] # result already includes the number 2. for num in range(3 , limit + 1 , 2 ): if is_prime(A_ ) and not contains_an_even_digit(A_ ): __UpperCAmelCase =str(A_ ) __UpperCAmelCase =[int(str_num[j:] + str_num[:j] ) for j in range(len(A_ ) )] if all(is_prime(A_ ) for i in list_nums ): result.append(A_ ) return result def lowercase__ ( ) -> int: """simple docstring""" return len(find_circular_primes() ) if __name__ == "__main__": print(F"""{len(find_circular_primes()) = }""")
68
from __future__ import annotations from collections.abc import Iterator class _A : """simple docstring""" def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> None: __UpperCAmelCase =value __UpperCAmelCase =None __UpperCAmelCase =None class _A : """simple docstring""" def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Node ) -> None: __UpperCAmelCase =tree def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Node | None ) -> int: if node is None: return 0 return node.value + ( self.depth_first_search(node.left ) + self.depth_first_search(node.right ) ) def __iter__( self : int ) -> Iterator[int]: yield self.depth_first_search(self.tree ) if __name__ == "__main__": import doctest doctest.testmod()
68
1
def lowercase__ ( A_: list[int] ) -> list[int]: """simple docstring""" __UpperCAmelCase =len(A_ ) for i in range(A_ ): for j in range(i + 1 , A_ ): if numbers[j] < numbers[i]: __UpperCAmelCase , __UpperCAmelCase =numbers[j], numbers[i] return numbers if __name__ == "__main__": __A = input("Enter numbers separated by a comma:\n").strip() __A = [int(item) for item in user_input.split(",")] print(exchange_sort(unsorted))
68
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType from ...utils.imports import is_botoa_available from .config_args import SageMakerConfig from .config_utils import ( DYNAMO_BACKENDS, _ask_field, _ask_options, _convert_dynamo_backend, _convert_mixed_precision, _convert_sagemaker_distributed_mode, _convert_yes_no_to_bool, ) if is_botoa_available(): import botoa # noqa: F401 def lowercase__ ( A_: Union[str, Any] ) -> List[Any]: """simple docstring""" __UpperCAmelCase =botoa.client("""iam""" ) __UpperCAmelCase ={ """Version""": """2012-10-17""", """Statement""": [ {"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""} ], } try: # create the role, associated with the chosen trust policy iam_client.create_role( RoleName=A_ , AssumeRolePolicyDocument=json.dumps(A_ , indent=2 ) ) __UpperCAmelCase ={ """Version""": """2012-10-17""", """Statement""": [ { """Effect""": """Allow""", """Action""": [ """sagemaker:*""", """ecr:GetDownloadUrlForLayer""", """ecr:BatchGetImage""", """ecr:BatchCheckLayerAvailability""", """ecr:GetAuthorizationToken""", """cloudwatch:PutMetricData""", """cloudwatch:GetMetricData""", """cloudwatch:GetMetricStatistics""", """cloudwatch:ListMetrics""", """logs:CreateLogGroup""", """logs:CreateLogStream""", """logs:DescribeLogStreams""", """logs:PutLogEvents""", """logs:GetLogEvents""", """s3:CreateBucket""", """s3:ListBucket""", """s3:GetBucketLocation""", """s3:GetObject""", """s3:PutObject""", ], """Resource""": """*""", } ], } # attach policy to role iam_client.put_role_policy( RoleName=A_ , PolicyName=F'''{role_name}_policy_permission''' , PolicyDocument=json.dumps(A_ , indent=2 ) , ) except iam_client.exceptions.EntityAlreadyExistsException: print(F'''role {role_name} already exists. Using existing one''' ) def lowercase__ ( A_: Dict ) -> Any: """simple docstring""" __UpperCAmelCase =botoa.client("""iam""" ) return iam_client.get_role(RoleName=A_ )["Role"]["Arn"] def lowercase__ ( ) -> Union[str, Any]: """simple docstring""" __UpperCAmelCase =_ask_options( """How do you want to authorize?""" , ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """] , A_ , ) __UpperCAmelCase =None if credentials_configuration == 0: __UpperCAmelCase =_ask_field("""Enter your AWS Profile name: [default] """ , default="""default""" ) __UpperCAmelCase =aws_profile else: print( """Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,""" """`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" ) __UpperCAmelCase =_ask_field("""AWS Access Key ID: """ ) __UpperCAmelCase =aws_access_key_id __UpperCAmelCase =_ask_field("""AWS Secret Access Key: """ ) __UpperCAmelCase =aws_secret_access_key __UpperCAmelCase =_ask_field("""Enter your AWS Region: [us-east-1]""" , default="""us-east-1""" ) __UpperCAmelCase =aws_region __UpperCAmelCase =_ask_options( """Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""" , ["""Provide IAM Role name""", """Create new IAM role using credentials"""] , A_ , ) if role_management == 0: __UpperCAmelCase =_ask_field("""Enter your IAM role name: """ ) else: __UpperCAmelCase ="""accelerate_sagemaker_execution_role""" print(F'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' ) _create_iam_role_for_sagemaker(A_ ) __UpperCAmelCase =_ask_field( """Do you want to use custom Docker image? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , ) __UpperCAmelCase =None if is_custom_docker_image: __UpperCAmelCase =_ask_field("""Enter your Docker image: """ , lambda A_ : str(A_ ).lower() ) __UpperCAmelCase =_ask_field( """Do you want to provide SageMaker input channels with data locations? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , ) __UpperCAmelCase =None if is_sagemaker_inputs_enabled: __UpperCAmelCase =_ask_field( """Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """ , lambda A_ : str(A_ ).lower() , ) __UpperCAmelCase =_ask_field( """Do you want to enable SageMaker metrics? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , ) __UpperCAmelCase =None if is_sagemaker_metrics_enabled: __UpperCAmelCase =_ask_field( """Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """ , lambda A_ : str(A_ ).lower() , ) __UpperCAmelCase =_ask_options( """What is the distributed mode?""" , ["""No distributed training""", """Data parallelism"""] , _convert_sagemaker_distributed_mode , ) __UpperCAmelCase ={} __UpperCAmelCase =_ask_field( """Do you wish to optimize your script with torch dynamo?[yes/NO]:""" , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , ) if use_dynamo: __UpperCAmelCase ="""dynamo_""" __UpperCAmelCase =_ask_options( """Which dynamo backend would you like to use?""" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , ) __UpperCAmelCase =_ask_field( """Do you want to customize the defaults sent to torch.compile? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , ) if use_custom_options: __UpperCAmelCase =_ask_options( """Which mode do you want to use?""" , A_ , lambda A_ : TORCH_DYNAMO_MODES[int(A_ )] , default="""default""" , ) __UpperCAmelCase =_ask_field( """Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , ) __UpperCAmelCase =_ask_field( """Do you want to enable dynamic shape tracing? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , ) __UpperCAmelCase ="""Which EC2 instance type you want to use for your training?""" if distributed_type != SageMakerDistributedType.NO: __UpperCAmelCase =_ask_options( A_ , A_ , lambda A_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(A_ )] ) else: eca_instance_query += "? [ml.p3.2xlarge]:" __UpperCAmelCase =_ask_field(A_ , lambda A_ : str(A_ ).lower() , default="""ml.p3.2xlarge""" ) __UpperCAmelCase =1 if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL): __UpperCAmelCase =_ask_field( """How many machines do you want use? [1]: """ , A_ , default=1 , ) __UpperCAmelCase =_ask_options( """Do you wish to use FP16 or BF16 (mixed precision)?""" , ["""no""", """fp16""", """bf16""", """fp8"""] , _convert_mixed_precision , ) if use_dynamo and mixed_precision == "no": print( """Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" ) return SageMakerConfig( image_uri=A_ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=A_ , use_cpu=A_ , dynamo_config=A_ , eca_instance_type=A_ , profile=A_ , region=A_ , iam_role_name=A_ , mixed_precision=A_ , num_machines=A_ , sagemaker_inputs_file=A_ , sagemaker_metrics_file=A_ , )
68
1
from math import ceil def lowercase__ ( A_: Optional[Any] , A_: Tuple ) -> Tuple: """simple docstring""" __UpperCAmelCase =list(range(0 , A_ ) ) __UpperCAmelCase =[item for sublist in list(device_map.values() ) for item in sublist] # Duplicate check __UpperCAmelCase =[] for i in device_map_blocks: if device_map_blocks.count(A_ ) > 1 and i not in duplicate_blocks: duplicate_blocks.append(A_ ) # Missing blocks __UpperCAmelCase =[i for i in blocks if i not in device_map_blocks] __UpperCAmelCase =[i for i in device_map_blocks if i not in blocks] if len(A_ ) != 0: raise ValueError( """Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.""" """ These attention blocks were specified more than once: """ + str(A_ ) ) if len(A_ ) != 0: raise ValueError( """There are attention blocks for this model that are not specified in the device_map. Add these attention """ """blocks to a device on the device_map: """ + str(A_ ) ) if len(A_ ) != 0: raise ValueError( """The device_map contains more attention blocks than this model has. Remove these from the device_map:""" + str(A_ ) ) def lowercase__ ( A_: Optional[Any] , A_: Optional[int] ) -> Dict: """simple docstring""" __UpperCAmelCase =list(range(A_ ) ) __UpperCAmelCase =int(ceil(n_layers / len(A_ ) ) ) __UpperCAmelCase =[layers[i : i + n_blocks] for i in range(0 , A_ , A_ )] return dict(zip(A_ , A_ ) )
68
from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"} class _A ( UpperCamelCase ): """simple docstring""" lowerCamelCase : Tuple = 'ctrl' lowerCamelCase : Any = ['past_key_values'] lowerCamelCase : Optional[int] = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=246534 , __SCREAMING_SNAKE_CASE : int=256 , __SCREAMING_SNAKE_CASE : Optional[Any]=1280 , __SCREAMING_SNAKE_CASE : Optional[Any]=8192 , __SCREAMING_SNAKE_CASE : int=48 , __SCREAMING_SNAKE_CASE : Union[str, Any]=16 , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=1e-6 , __SCREAMING_SNAKE_CASE : List[str]=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , **__SCREAMING_SNAKE_CASE : int , ) -> Any: __UpperCAmelCase =vocab_size __UpperCAmelCase =n_positions __UpperCAmelCase =n_embd __UpperCAmelCase =n_layer __UpperCAmelCase =n_head __UpperCAmelCase =dff __UpperCAmelCase =resid_pdrop __UpperCAmelCase =embd_pdrop __UpperCAmelCase =layer_norm_epsilon __UpperCAmelCase =initializer_range __UpperCAmelCase =use_cache super().__init__(**__SCREAMING_SNAKE_CASE )
68
1
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class _A ( UpperCamelCase ): """simple docstring""" lowerCamelCase : int = ['image_processor', 'tokenizer'] lowerCamelCase : Union[str, Any] = 'ViltImageProcessor' lowerCamelCase : List[Any] = ('BertTokenizer', 'BertTokenizerFast') def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , **__SCREAMING_SNAKE_CASE : Dict ) -> Optional[int]: __UpperCAmelCase =None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , __SCREAMING_SNAKE_CASE , ) __UpperCAmelCase =kwargs.pop("""feature_extractor""" ) __UpperCAmelCase =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =self.image_processor def __call__( self : List[str] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Union[bool, str, PaddingStrategy] = False , __SCREAMING_SNAKE_CASE : Union[bool, str, TruncationStrategy] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : int = 0 , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , **__SCREAMING_SNAKE_CASE : Dict , ) -> BatchEncoding: __UpperCAmelCase =self.tokenizer( text=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_overflowing_tokens=__SCREAMING_SNAKE_CASE , return_special_tokens_mask=__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , return_length=__SCREAMING_SNAKE_CASE , verbose=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) # add pixel_values + pixel_mask __UpperCAmelCase =self.image_processor(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE ) encoding.update(__SCREAMING_SNAKE_CASE ) return encoding def _a ( self : str , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : Any ) -> str: return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def _a ( self : Union[str, Any] , *__SCREAMING_SNAKE_CASE : int , **__SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]: return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) @property def _a ( self : Union[str, Any] ) -> Dict: __UpperCAmelCase =self.tokenizer.model_input_names __UpperCAmelCase =self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def _a ( self : Dict ) -> Any: warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __SCREAMING_SNAKE_CASE , ) return self.image_processor_class @property def _a ( self : str ) -> int: warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __SCREAMING_SNAKE_CASE , ) return self.image_processor
68
import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() __A = logging.get_logger(__name__) __A = [ ["attention", "attn"], ["encoder_attention", "encoder_attn"], ["q_lin", "q_proj"], ["k_lin", "k_proj"], ["v_lin", "v_proj"], ["out_lin", "out_proj"], ["norm_embeddings", "layernorm_embedding"], ["position_embeddings", "embed_positions"], ["embeddings", "embed_tokens"], ["ffn.lin", "fc"], ] def lowercase__ ( A_: Optional[Any] ) -> Union[str, Any]: """simple docstring""" if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: __UpperCAmelCase =k.replace(A_ , A_ ) if k.startswith("""encoder""" ): __UpperCAmelCase =k.replace(""".attn""" , """.self_attn""" ) __UpperCAmelCase =k.replace("""norm1""" , """self_attn_layer_norm""" ) __UpperCAmelCase =k.replace("""norm2""" , """final_layer_norm""" ) elif k.startswith("""decoder""" ): __UpperCAmelCase =k.replace("""norm1""" , """self_attn_layer_norm""" ) __UpperCAmelCase =k.replace("""norm2""" , """encoder_attn_layer_norm""" ) __UpperCAmelCase =k.replace("""norm3""" , """final_layer_norm""" ) return k def lowercase__ ( A_: Tuple ) -> str: """simple docstring""" __UpperCAmelCase =[ """model.encoder.layernorm_embedding.weight""", """model.encoder.layernorm_embedding.bias""", """model.decoder.layernorm_embedding.weight""", """model.decoder.layernorm_embedding.bias""", ] for k in keys: __UpperCAmelCase =sd.pop(A_ ) __UpperCAmelCase =k.replace("""layernorm_embedding""" , """layer_norm""" ) assert new_k not in sd __UpperCAmelCase =v __A = ["START"] @torch.no_grad() def lowercase__ ( A_: List[Any] , A_: str , A_: int ) -> Optional[int]: """simple docstring""" __UpperCAmelCase =torch.load(A_ , map_location="""cpu""" ) __UpperCAmelCase =model["""model"""] __UpperCAmelCase =BlenderbotConfig.from_json_file(A_ ) __UpperCAmelCase =BlenderbotForConditionalGeneration(A_ ) __UpperCAmelCase =m.model.state_dict().keys() __UpperCAmelCase =[] __UpperCAmelCase ={} for k, v in sd.items(): if k in IGNORE_KEYS: continue __UpperCAmelCase =rename_state_dict_key(A_ ) if new_k not in valid_keys: failures.append([k, new_k] ) else: __UpperCAmelCase =v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(A_ ) m.model.load_state_dict(A_ , strict=A_ ) m.half() m.save_pretrained(A_ ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin") parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.") parser.add_argument( "--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use" ) __A = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
68
1
import os import unittest from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, BertTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class _A ( UpperCamelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase : Optional[int] = BertTokenizer lowerCamelCase : Any = BertTokenizerFast lowerCamelCase : List[str] = True lowerCamelCase : Any = True lowerCamelCase : str = filter_non_english def _a ( self : Dict ) -> str: super().setUp() __UpperCAmelCase =[ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] __UpperCAmelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def _a ( self : Dict , __SCREAMING_SNAKE_CASE : Dict ) -> Tuple: __UpperCAmelCase ="""UNwant\u00E9d,running""" __UpperCAmelCase ="""unwanted, running""" return input_text, output_text def _a ( self : List[str] ) -> Union[str, Any]: __UpperCAmelCase =self.tokenizer_class(self.vocab_file ) __UpperCAmelCase =tokenizer.tokenize("""UNwant\u00E9d,running""" ) self.assertListEqual(__SCREAMING_SNAKE_CASE , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [9, 6, 7, 12, 10, 11] ) def _a ( self : int ) -> Dict: if not self.test_rust_tokenizer: return __UpperCAmelCase =self.get_tokenizer() __UpperCAmelCase =self.get_rust_tokenizer() __UpperCAmelCase ="""UNwant\u00E9d,running""" __UpperCAmelCase =tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =self.get_rust_tokenizer() __UpperCAmelCase =tokenizer.encode(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =rust_tokenizer.encode(__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # With lower casing __UpperCAmelCase =self.get_tokenizer(do_lower_case=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =self.get_rust_tokenizer(do_lower_case=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase ="""UNwant\u00E9d,running""" __UpperCAmelCase =tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =self.get_rust_tokenizer() __UpperCAmelCase =tokenizer.encode(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =rust_tokenizer.encode(__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : Union[str, Any] ) -> Dict: __UpperCAmelCase =BasicTokenizer() self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] ) def _a ( self : Dict ) -> List[Any]: __UpperCAmelCase =BasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] ) def _a ( self : Optional[Any] ) -> int: __UpperCAmelCase =BasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] ) def _a ( self : Optional[int] ) -> Any: __UpperCAmelCase =BasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] ) def _a ( self : Dict ) -> int: __UpperCAmelCase =BasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] ) def _a ( self : int ) -> Tuple: __UpperCAmelCase =BasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def _a ( self : Optional[int] ) -> Optional[int]: __UpperCAmelCase =BasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def _a ( self : Any ) -> int: __UpperCAmelCase =BasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def _a ( self : List[Any] ) -> Optional[int]: __UpperCAmelCase =BasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE , never_split=["""[UNK]"""] ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] ) def _a ( self : int ) -> Any: __UpperCAmelCase =BasicTokenizer() __UpperCAmelCase ="""a\n'll !!to?'d of, can't.""" __UpperCAmelCase =["""a""", """'""", """ll""", """!""", """!""", """to""", """?""", """'""", """d""", """of""", """,""", """can""", """'""", """t""", """."""] self.assertListEqual(tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) def _a ( self : Optional[Any] ) -> Tuple: __UpperCAmelCase =["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""] __UpperCAmelCase ={} for i, token in enumerate(__SCREAMING_SNAKE_CASE ): __UpperCAmelCase =i __UpperCAmelCase =WordpieceTokenizer(vocab=__SCREAMING_SNAKE_CASE , unk_token="""[UNK]""" ) self.assertListEqual(tokenizer.tokenize("""""" ) , [] ) self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] ) def _a ( self : Optional[Any] ) -> Dict: self.assertTrue(_is_whitespace(""" """ ) ) self.assertTrue(_is_whitespace("""\t""" ) ) self.assertTrue(_is_whitespace("""\r""" ) ) self.assertTrue(_is_whitespace("""\n""" ) ) self.assertTrue(_is_whitespace("""\u00A0""" ) ) self.assertFalse(_is_whitespace("""A""" ) ) self.assertFalse(_is_whitespace("""-""" ) ) def _a ( self : List[Any] ) -> List[Any]: self.assertTrue(_is_control("""\u0005""" ) ) self.assertFalse(_is_control("""A""" ) ) self.assertFalse(_is_control(""" """ ) ) self.assertFalse(_is_control("""\t""" ) ) self.assertFalse(_is_control("""\r""" ) ) def _a ( self : Optional[int] ) -> Union[str, Any]: self.assertTrue(_is_punctuation("""-""" ) ) self.assertTrue(_is_punctuation("""$""" ) ) self.assertTrue(_is_punctuation("""`""" ) ) self.assertTrue(_is_punctuation(""".""" ) ) self.assertFalse(_is_punctuation("""A""" ) ) self.assertFalse(_is_punctuation(""" """ ) ) def _a ( self : Tuple ) -> Optional[int]: __UpperCAmelCase =self.get_tokenizer() __UpperCAmelCase =self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] ) self.assertListEqual( [rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] ) @slow def _a ( self : Any ) -> List[Any]: __UpperCAmelCase =self.tokenizer_class.from_pretrained("""bert-base-uncased""" ) __UpperCAmelCase =tokenizer.encode("""sequence builders""" , add_special_tokens=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =tokenizer.encode("""multi-sequence build""" , add_special_tokens=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def _a ( self : List[str] ) -> Optional[int]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __UpperCAmelCase =self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.''' __UpperCAmelCase =tokenizer_r.encode_plus( __SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , ) __UpperCAmelCase =tokenizer_r.do_lower_case if hasattr(__SCREAMING_SNAKE_CASE , """do_lower_case""" ) else False __UpperCAmelCase =( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), """A"""), ((1, 2), ""","""), ((3, 5), """na"""), ((5, 6), """##ï"""), ((6, 8), """##ve"""), ((9, 15), tokenizer_r.mask_token), ((16, 21), """Allen"""), ((21, 23), """##NL"""), ((23, 24), """##P"""), ((25, 33), """sentence"""), ((33, 34), """."""), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), """a"""), ((1, 2), ""","""), ((3, 8), """naive"""), ((9, 15), tokenizer_r.mask_token), ((16, 21), """allen"""), ((21, 23), """##nl"""), ((23, 24), """##p"""), ((25, 33), """sentence"""), ((33, 34), """."""), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] ) def _a ( self : Dict ) -> Optional[int]: __UpperCAmelCase =["""的""", """人""", """有"""] __UpperCAmelCase ="""""".join(__SCREAMING_SNAKE_CASE ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __UpperCAmelCase =True __UpperCAmelCase =self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =tokenizer_p.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =tokenizer_r.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =tokenizer_r.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =tokenizer_p.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =False __UpperCAmelCase =self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =tokenizer_r.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =tokenizer_p.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =tokenizer_r.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =tokenizer_p.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ) # it is expected that only the first Chinese character is not preceded by "##". __UpperCAmelCase =[ f'''##{token}''' if idx != 0 else token for idx, token in enumerate(__SCREAMING_SNAKE_CASE ) ] self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
68
from itertools import permutations def lowercase__ ( A_: tuple ) -> bool: """simple docstring""" if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False __UpperCAmelCase =[7, 11, 13, 17] for i, test in enumerate(A_ ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def lowercase__ ( A_: int = 10 ) -> int: """simple docstring""" return sum( int("""""".join(map(A_ , A_ ) ) ) for num in permutations(range(A_ ) ) if is_substring_divisible(A_ ) ) if __name__ == "__main__": print(F"""{solution() = }""")
68
1
def lowercase__ ( A_: int , A_: list ) -> Any: """simple docstring""" _enforce_args(A_ , A_ ) if n == 0: return 0 __UpperCAmelCase =float("""-inf""" ) for i in range(1 , n + 1 ): __UpperCAmelCase =max( A_ , prices[i - 1] + naive_cut_rod_recursive(n - i , A_ ) ) return max_revue def lowercase__ ( A_: int , A_: list ) -> List[str]: """simple docstring""" _enforce_args(A_ , A_ ) __UpperCAmelCase =[float("""-inf""" ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(A_ , A_ , A_ ) def lowercase__ ( A_: int , A_: list , A_: list ) -> Dict: """simple docstring""" if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: __UpperCAmelCase =float("""-inf""" ) for i in range(1 , n + 1 ): __UpperCAmelCase =max( A_ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , A_ , A_ ) , ) __UpperCAmelCase =max_revenue return max_rev[n] def lowercase__ ( A_: int , A_: list ) -> Optional[int]: """simple docstring""" _enforce_args(A_ , A_ ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. __UpperCAmelCase =[float("""-inf""" ) for _ in range(n + 1 )] __UpperCAmelCase =0 for i in range(1 , n + 1 ): __UpperCAmelCase =max_rev[i] for j in range(1 , i + 1 ): __UpperCAmelCase =max(A_ , prices[j - 1] + max_rev[i - j] ) __UpperCAmelCase =max_revenue_i return max_rev[n] def lowercase__ ( A_: int , A_: list ) -> Union[str, Any]: """simple docstring""" if n < 0: __UpperCAmelCase =F'''n must be greater than or equal to 0. Got n = {n}''' raise ValueError(A_ ) if n > len(A_ ): __UpperCAmelCase =( """Each integral piece of rod must have a corresponding price. """ F'''Got n = {n} but length of prices = {len(A_ )}''' ) raise ValueError(A_ ) def lowercase__ ( ) -> int: """simple docstring""" __UpperCAmelCase =[6, 10, 12, 15, 20, 23] __UpperCAmelCase =len(A_ ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. __UpperCAmelCase =36 __UpperCAmelCase =top_down_cut_rod(A_ , A_ ) __UpperCAmelCase =bottom_up_cut_rod(A_ , A_ ) __UpperCAmelCase =naive_cut_rod_recursive(A_ , A_ ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
68
from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar __A = TypeVar("T") def lowercase__ ( A_: int ) -> int: """simple docstring""" return (position - 1) // 2 def lowercase__ ( A_: int ) -> int: """simple docstring""" return (2 * position) + 1 def lowercase__ ( A_: int ) -> int: """simple docstring""" return (2 * position) + 2 class _A ( Generic[T] ): """simple docstring""" def __init__( self : List[str] ) -> None: __UpperCAmelCase =[] __UpperCAmelCase ={} __UpperCAmelCase =0 def __len__( self : str ) -> int: return self.elements def __repr__( self : Dict ) -> str: return str(self.heap ) def _a ( self : Optional[int] ) -> bool: # Check if the priority queue is empty return self.elements == 0 def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None: # Add an element with given priority to the queue self.heap.append((elem, weight) ) __UpperCAmelCase =self.elements self.elements += 1 self._bubble_up(__SCREAMING_SNAKE_CASE ) def _a ( self : Optional[int] ) -> T: # Remove and return the element with lowest weight (highest priority) if self.elements > 1: self._swap_nodes(0 , self.elements - 1 ) __UpperCAmelCase , __UpperCAmelCase =self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: __UpperCAmelCase , __UpperCAmelCase =self.heap[0] self._bubble_down(__SCREAMING_SNAKE_CASE ) return elem def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None: # Update the weight of the given key __UpperCAmelCase =self.position_map[elem] __UpperCAmelCase =(elem, weight) if position > 0: __UpperCAmelCase =get_parent_position(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase , __UpperCAmelCase =self.heap[parent_position] if parent_weight > weight: self._bubble_up(__SCREAMING_SNAKE_CASE ) else: self._bubble_down(__SCREAMING_SNAKE_CASE ) else: self._bubble_down(__SCREAMING_SNAKE_CASE ) def _a ( self : Any , __SCREAMING_SNAKE_CASE : T ) -> None: # Place a node at the proper position (upward movement) [to be used internally # only] __UpperCAmelCase =self.position_map[elem] if curr_pos == 0: return None __UpperCAmelCase =get_parent_position(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase , __UpperCAmelCase =self.heap[curr_pos] __UpperCAmelCase , __UpperCAmelCase =self.heap[parent_position] if parent_weight > weight: self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return self._bubble_up(__SCREAMING_SNAKE_CASE ) return None def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : T ) -> None: # Place a node at the proper position (downward movement) [to be used # internally only] __UpperCAmelCase =self.position_map[elem] __UpperCAmelCase , __UpperCAmelCase =self.heap[curr_pos] __UpperCAmelCase =get_child_left_position(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =get_child_right_position(__SCREAMING_SNAKE_CASE ) if child_left_position < self.elements and child_right_position < self.elements: __UpperCAmelCase , __UpperCAmelCase =self.heap[child_left_position] __UpperCAmelCase , __UpperCAmelCase =self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return self._bubble_down(__SCREAMING_SNAKE_CASE ) if child_left_position < self.elements: __UpperCAmelCase , __UpperCAmelCase =self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return self._bubble_down(__SCREAMING_SNAKE_CASE ) else: return None if child_right_position < self.elements: __UpperCAmelCase , __UpperCAmelCase =self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return self._bubble_down(__SCREAMING_SNAKE_CASE ) return None def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> None: # Swap the nodes at the given positions __UpperCAmelCase =self.heap[nodea_pos][0] __UpperCAmelCase =self.heap[nodea_pos][0] __UpperCAmelCase , __UpperCAmelCase =( self.heap[nodea_pos], self.heap[nodea_pos], ) __UpperCAmelCase =nodea_pos __UpperCAmelCase =nodea_pos class _A ( Generic[T] ): """simple docstring""" def __init__( self : List[Any] ) -> None: __UpperCAmelCase ={} __UpperCAmelCase =0 def __repr__( self : Tuple ) -> str: return str(self.connections ) def __len__( self : str ) -> int: return self.nodes def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : T ) -> None: # Add a node in the graph if it is not in the graph if node not in self.connections: __UpperCAmelCase ={} self.nodes += 1 def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None: # Add an edge between 2 nodes in the graph self.add_node(__SCREAMING_SNAKE_CASE ) self.add_node(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =weight __UpperCAmelCase =weight def lowercase__ ( A_: GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]: """simple docstring""" __UpperCAmelCase ={node: maxsize for node in graph.connections} __UpperCAmelCase ={node: None for node in graph.connections} __UpperCAmelCase =MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(A_ , A_ ) if priority_queue.is_empty(): return dist, parent # initialization __UpperCAmelCase =priority_queue.extract_min() __UpperCAmelCase =0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: __UpperCAmelCase =dist[node] + graph.connections[node][neighbour] priority_queue.update_key(A_ , dist[neighbour] ) __UpperCAmelCase =node # running prim's algorithm while not priority_queue.is_empty(): __UpperCAmelCase =priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: __UpperCAmelCase =dist[node] + graph.connections[node][neighbour] priority_queue.update_key(A_ , dist[neighbour] ) __UpperCAmelCase =node return dist, parent
68
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"} class _A ( UpperCamelCase ): """simple docstring""" lowerCamelCase : Optional[int] = 'openai-gpt' lowerCamelCase : int = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : str=40478 , __SCREAMING_SNAKE_CASE : Optional[Any]=512 , __SCREAMING_SNAKE_CASE : Optional[int]=768 , __SCREAMING_SNAKE_CASE : Dict=12 , __SCREAMING_SNAKE_CASE : Optional[Any]=12 , __SCREAMING_SNAKE_CASE : List[Any]="gelu" , __SCREAMING_SNAKE_CASE : str=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : str=1e-5 , __SCREAMING_SNAKE_CASE : Tuple=0.02 , __SCREAMING_SNAKE_CASE : Tuple="cls_index" , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : int=0.1 , **__SCREAMING_SNAKE_CASE : List[str] , ) -> Optional[int]: __UpperCAmelCase =vocab_size __UpperCAmelCase =n_positions __UpperCAmelCase =n_embd __UpperCAmelCase =n_layer __UpperCAmelCase =n_head __UpperCAmelCase =afn __UpperCAmelCase =resid_pdrop __UpperCAmelCase =embd_pdrop __UpperCAmelCase =attn_pdrop __UpperCAmelCase =layer_norm_epsilon __UpperCAmelCase =initializer_range __UpperCAmelCase =summary_type __UpperCAmelCase =summary_use_proj __UpperCAmelCase =summary_activation __UpperCAmelCase =summary_first_dropout __UpperCAmelCase =summary_proj_to_labels super().__init__(**__SCREAMING_SNAKE_CASE )
68
from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf __A = logging.get_logger(__name__) @dataclass class _A ( UpperCamelCase ): """simple docstring""" lowerCamelCase : Optional[int] = [ 'no_inference', 'no_cuda', 'no_tpu', 'no_speed', 'no_memory', 'no_env_print', 'no_multi_process', ] def __init__( self : Any , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Dict: for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: __UpperCAmelCase =deprecated_arg[3:] __UpperCAmelCase =not kwargs.pop(__SCREAMING_SNAKE_CASE ) logger.warning( f'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or''' f''' {positive_arg}={kwargs[positive_arg]}''' ) __UpperCAmelCase =kwargs.pop("""tpu_name""" , self.tpu_name ) __UpperCAmelCase =kwargs.pop("""device_idx""" , self.device_idx ) __UpperCAmelCase =kwargs.pop("""eager_mode""" , self.eager_mode ) __UpperCAmelCase =kwargs.pop("""use_xla""" , self.use_xla ) super().__init__(**__SCREAMING_SNAKE_CASE ) lowerCamelCase : str = field( default=UpperCamelCase , metadata={'help': 'Name of TPU'} , ) lowerCamelCase : int = field( default=0 , metadata={'help': 'CPU / GPU device index. Defaults to 0.'} , ) lowerCamelCase : bool = field(default=UpperCamelCase , metadata={'help': 'Benchmark models in eager model.'} ) lowerCamelCase : bool = field( default=UpperCamelCase , metadata={ 'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.' } , ) @cached_property def _a ( self : List[str] ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]: requires_backends(self , ["""tf"""] ) __UpperCAmelCase =None if self.tpu: try: if self.tpu_name: __UpperCAmelCase =tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: __UpperCAmelCase =tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: __UpperCAmelCase =None return tpu @cached_property def _a ( self : Tuple ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]: requires_backends(self , ["""tf"""] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) __UpperCAmelCase =tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" ) __UpperCAmelCase =tf.distribute.OneDeviceStrategy(device=f'''/gpu:{self.device_idx}''' ) else: tf.config.set_visible_devices([] , """GPU""" ) # disable GPU __UpperCAmelCase =tf.distribute.OneDeviceStrategy(device=f'''/cpu:{self.device_idx}''' ) return strategy @property def _a ( self : Optional[Any] ) -> bool: requires_backends(self , ["""tf"""] ) return self._setup_tpu is not None @property def _a ( self : str ) -> "tf.distribute.Strategy": requires_backends(self , ["""tf"""] ) return self._setup_strategy @property def _a ( self : Dict ) -> Optional[int]: requires_backends(self , ["""tf"""] ) return tf.config.list_physical_devices("""GPU""" ) @property def _a ( self : List[str] ) -> int: requires_backends(self , ["""tf"""] ) if self.cuda: return len(self.gpu_list ) return 0 @property def _a ( self : List[str] ) -> bool: return self.n_gpu > 0
68
1
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: __A = None __A = logging.get_logger(__name__) __A = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} __A = { "vocab_file": { "moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model", "moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model", "moussaKam/barthez-orangesum-title": ( "https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model" ), }, "tokenizer_file": { "moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json", "moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json", "moussaKam/barthez-orangesum-title": ( "https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json" ), }, } __A = { "moussaKam/mbarthez": 10_24, "moussaKam/barthez": 10_24, "moussaKam/barthez-orangesum-title": 10_24, } __A = "▁" class _A ( UpperCamelCase ): """simple docstring""" lowerCamelCase : Any = VOCAB_FILES_NAMES lowerCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase : Dict = ['input_ids', 'attention_mask'] lowerCamelCase : Any = BarthezTokenizer def __init__( self : Any , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Any="<s>" , __SCREAMING_SNAKE_CASE : str="</s>" , __SCREAMING_SNAKE_CASE : Union[str, Any]="</s>" , __SCREAMING_SNAKE_CASE : Any="<s>" , __SCREAMING_SNAKE_CASE : Union[str, Any]="<unk>" , __SCREAMING_SNAKE_CASE : List[str]="<pad>" , __SCREAMING_SNAKE_CASE : Optional[Any]="<mask>" , **__SCREAMING_SNAKE_CASE : Dict , ) -> List[str]: # Mask token behave like a normal word, i.e. include the space before it __UpperCAmelCase =AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token super().__init__( __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) __UpperCAmelCase =vocab_file __UpperCAmelCase =False if not self.vocab_file else True def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __UpperCAmelCase =[self.cls_token_id] __UpperCAmelCase =[self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _a ( self : str , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]: __UpperCAmelCase =[self.sep_token_id] __UpperCAmelCase =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _a ( self : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(__SCREAMING_SNAKE_CASE ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __UpperCAmelCase =os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ): copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
68
import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class _A ( unittest.TestCase ): """simple docstring""" @property def _a ( self : List[str] ) -> Dict: torch.manual_seed(0 ) __UpperCAmelCase =UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , ) return model def _a ( self : int ) -> Union[str, Any]: __UpperCAmelCase =self.dummy_uncond_unet __UpperCAmelCase =ScoreSdeVeScheduler() __UpperCAmelCase =ScoreSdeVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE ) sde_ve.to(__SCREAMING_SNAKE_CASE ) sde_ve.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =torch.manual_seed(0 ) __UpperCAmelCase =sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=__SCREAMING_SNAKE_CASE ).images __UpperCAmelCase =torch.manual_seed(0 ) __UpperCAmelCase =sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )[ 0 ] __UpperCAmelCase =image[0, -3:, -3:, -1] __UpperCAmelCase =image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __UpperCAmelCase =np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class _A ( unittest.TestCase ): """simple docstring""" def _a ( self : Optional[int] ) -> int: __UpperCAmelCase ="""google/ncsnpp-church-256""" __UpperCAmelCase =UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =ScoreSdeVeScheduler.from_pretrained(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =ScoreSdeVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE ) sde_ve.to(__SCREAMING_SNAKE_CASE ) sde_ve.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =torch.manual_seed(0 ) __UpperCAmelCase =sde_ve(num_inference_steps=10 , output_type="""numpy""" , generator=__SCREAMING_SNAKE_CASE ).images __UpperCAmelCase =image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) __UpperCAmelCase =np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
68
1
from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers __A = [ "python", "tqdm", "regex", "requests", "packaging", "filelock", "numpy", "tokenizers", "huggingface-hub", "safetensors", "accelerate", "pyyaml", ] for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed elif pkg == "accelerate": # must be loaded here, or else tqdm check may fail from .utils import is_accelerate_available # Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of # Transformers with PyTorch if not is_accelerate_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""") def lowercase__ ( A_: str , A_: Any=None ) -> Any: """simple docstring""" require_version(deps[pkg] , A_ )
68
from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging from transformers.integrations import is_fairscale_available from transformers.models.fsmt.configuration_fsmt import FSMTConfig from transformers.optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.trainer_pt_utils import get_tpu_sampler from transformers.training_args import ParallelMode from transformers.utils import is_torch_tpu_available if is_fairscale_available(): from fairscale.optim import OSS __A = logging.get_logger(__name__) __A = { "linear": get_linear_schedule_with_warmup, "cosine": get_cosine_schedule_with_warmup, "cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup, "polynomial": get_polynomial_decay_schedule_with_warmup, "constant": get_constant_schedule, "constant_w_warmup": get_constant_schedule_with_warmup, } class _A ( UpperCamelCase ): """simple docstring""" def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : str=None , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Any: super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if config is None: assert isinstance(self.model , __SCREAMING_SNAKE_CASE ), ( "If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is" f''' {self.model.__class__}''' ) __UpperCAmelCase =self.model.config else: __UpperCAmelCase =config __UpperCAmelCase =data_args __UpperCAmelCase =self.config.tgt_vocab_size if isinstance(self.config , __SCREAMING_SNAKE_CASE ) else self.config.vocab_size if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss): assert self.config.pad_token_id is not None, ( "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss" " calculation or doing label smoothing." ) if self.config.pad_token_id is None and self.config.eos_token_id is not None: logger.warning( f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for''' """ padding..""" ) if self.args.label_smoothing == 0: __UpperCAmelCase =torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id ) else: # dynamically import label_smoothed_nll_loss from utils import label_smoothed_nll_loss __UpperCAmelCase =label_smoothed_nll_loss def _a ( self : Any , __SCREAMING_SNAKE_CASE : int ) -> Any: if self.optimizer is None: __UpperCAmelCase =["""bias""", """LayerNorm.weight"""] __UpperCAmelCase =[ { """params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )], """weight_decay""": self.args.weight_decay, }, { """params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )], """weight_decay""": 0.0, }, ] __UpperCAmelCase =Adafactor if self.args.adafactor else AdamW if self.args.adafactor: __UpperCAmelCase =Adafactor __UpperCAmelCase ={"""scale_parameter""": False, """relative_step""": False} else: __UpperCAmelCase =AdamW __UpperCAmelCase ={ """betas""": (self.args.adam_betaa, self.args.adam_betaa), """eps""": self.args.adam_epsilon, } __UpperCAmelCase =self.args.learning_rate if self.sharded_ddp: __UpperCAmelCase =OSS( params=__SCREAMING_SNAKE_CASE , optim=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) else: __UpperCAmelCase =optimizer_cls(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if self.lr_scheduler is None: __UpperCAmelCase =self._get_lr_scheduler(__SCREAMING_SNAKE_CASE ) else: # ignoring --lr_scheduler logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" ) def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Any: __UpperCAmelCase =arg_to_scheduler[self.args.lr_scheduler] if self.args.lr_scheduler == "constant": __UpperCAmelCase =schedule_func(self.optimizer ) elif self.args.lr_scheduler == "constant_w_warmup": __UpperCAmelCase =schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps ) else: __UpperCAmelCase =schedule_func( self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__SCREAMING_SNAKE_CASE ) return scheduler def _a ( self : Optional[Any] ) -> Optional[torch.utils.data.Sampler]: if isinstance(self.train_dataset , torch.utils.data.IterableDataset ): return None elif is_torch_tpu_available(): return get_tpu_sampler(self.train_dataset ) else: if self.args.sortish_sampler: self.train_dataset.make_sortish_sampler( self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , ) return ( RandomSampler(self.train_dataset ) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset ) ) def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple: if self.args.label_smoothing == 0: if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token __UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[0] __UpperCAmelCase =self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) ) else: # compute usual loss via models __UpperCAmelCase , __UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[:2] else: # compute label smoothed loss __UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[0] __UpperCAmelCase =torch.nn.functional.log_softmax(__SCREAMING_SNAKE_CASE , dim=-1 ) __UpperCAmelCase , __UpperCAmelCase =self.loss_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.args.label_smoothing , ignore_index=self.config.pad_token_id ) return loss, logits def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Dict: __UpperCAmelCase =inputs.pop("""labels""" ) __UpperCAmelCase , __UpperCAmelCase =self._compute_loss(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return loss def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : nn.Module , __SCREAMING_SNAKE_CASE : Dict[str, Union[torch.Tensor, Any]] , __SCREAMING_SNAKE_CASE : bool , __SCREAMING_SNAKE_CASE : Optional[List[str]] = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: __UpperCAmelCase =self._prepare_inputs(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase ={ """max_length""": self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, """num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams, } if self.args.predict_with_generate and not self.args.prediction_loss_only: __UpperCAmelCase =self.model.generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **__SCREAMING_SNAKE_CASE , ) # in case the batch is shorter than max length, the output should be padded if generated_tokens.shape[-1] < gen_kwargs["max_length"]: __UpperCAmelCase =self._pad_tensors_to_max_len(__SCREAMING_SNAKE_CASE , gen_kwargs["""max_length"""] ) __UpperCAmelCase =inputs.pop("""labels""" ) with torch.no_grad(): # compute loss on predict data __UpperCAmelCase , __UpperCAmelCase =self._compute_loss(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =loss.mean().detach() if self.args.prediction_loss_only: return (loss, None, None) __UpperCAmelCase =generated_tokens if self.args.predict_with_generate else logits if labels.shape[-1] < gen_kwargs["max_length"]: __UpperCAmelCase =self._pad_tensors_to_max_len(__SCREAMING_SNAKE_CASE , gen_kwargs["""max_length"""] ) return (loss, logits, labels) def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int ) -> List[Any]: # If PAD token is not defined at least EOS token has to be defined __UpperCAmelCase =self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id if pad_token_id is None: raise ValueError( """Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be""" f''' padded to `max_length`={max_length}''' ) __UpperCAmelCase =pad_token_id * torch.ones( (tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device ) __UpperCAmelCase =tensor return padded_tensor
68
1
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( UniSpeechConfig, UniSpeechForCTC, UniSpeechForPreTraining, WavaVecaFeatureExtractor, WavaVecaPhonemeCTCTokenizer, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __A = logging.get_logger(__name__) __A = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "ctc_proj", "mask_emb": "masked_spec_embed", } __A = [ "ctc_proj", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def lowercase__ ( A_: int , A_: Optional[int] , A_: Optional[int] , A_: int , A_: List[str] , A_: Union[str, Any] ) -> Any: """simple docstring""" for attribute in key.split(""".""" ): if is_finetuned: if attribute in ["quantizer", "project_q", "project_hid"]: # those layers are only relevant for pretraining and should be dropped return if attribute == "ctc_proj": # we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models __UpperCAmelCase ="""lm_head""" __UpperCAmelCase =getattr(A_ , A_ ) if weight_type is not None: __UpperCAmelCase =getattr(A_ , A_ ).shape else: __UpperCAmelCase =hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": __UpperCAmelCase =value elif weight_type == "weight_g": __UpperCAmelCase =value elif weight_type == "weight_v": __UpperCAmelCase =value elif weight_type == "bias": __UpperCAmelCase =value else: __UpperCAmelCase =value logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def lowercase__ ( A_: str , A_: int , A_: int ) -> List[str]: """simple docstring""" __UpperCAmelCase =[] __UpperCAmelCase =fairseq_model.state_dict() __UpperCAmelCase =hf_model.unispeech.feature_extractor for name, value in fairseq_dict.items(): __UpperCAmelCase =False if "conv_layers" in name: load_conv_layer( A_ , A_ , A_ , A_ , hf_model.config.feat_extract_norm == """group""" , ) __UpperCAmelCase =True else: for key, mapped_key in MAPPING.items(): __UpperCAmelCase ="""unispeech.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: __UpperCAmelCase =True if "*" in mapped_key: __UpperCAmelCase =name.split(A_ )[0].split(""".""" )[-2] __UpperCAmelCase =mapped_key.replace("""*""" , A_ ) if "weight_g" in name: __UpperCAmelCase ="""weight_g""" elif "weight_v" in name: __UpperCAmelCase ="""weight_v""" elif "bias" in name: __UpperCAmelCase ="""bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj __UpperCAmelCase ="""weight""" else: __UpperCAmelCase =None set_recursively(A_ , A_ , A_ , A_ , A_ , A_ ) continue if not is_used: unused_weights.append(A_ ) logger.warning(F'''Unused weights: {unused_weights}''' ) def lowercase__ ( A_: List[str] , A_: Union[str, Any] , A_: Any , A_: Optional[int] , A_: Union[str, Any] ) -> Tuple: """simple docstring""" __UpperCAmelCase =full_name.split("""conv_layers.""" )[-1] __UpperCAmelCase =name.split(""".""" ) __UpperCAmelCase =int(items[0] ) __UpperCAmelCase =int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) __UpperCAmelCase =value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) __UpperCAmelCase =value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) __UpperCAmelCase =value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) __UpperCAmelCase =value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(A_ ) @torch.no_grad() def lowercase__ ( A_: Optional[int] , A_: str , A_: List[Any]=None , A_: List[str]=None , A_: Optional[int]=True ) -> Dict: """simple docstring""" if config_path is not None: __UpperCAmelCase =UniSpeechConfig.from_pretrained(A_ ) else: __UpperCAmelCase =UniSpeechConfig() if is_finetuned: if dict_path: __UpperCAmelCase =Dictionary.load_from_json(A_ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __UpperCAmelCase =target_dict.pad_index __UpperCAmelCase =target_dict.bos_index __UpperCAmelCase =target_dict.eos_index __UpperCAmelCase =len(target_dict.symbols ) __UpperCAmelCase =os.path.join(A_ , """vocab.json""" ) if not os.path.isdir(A_ ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(A_ ) ) return os.makedirs(A_ , exist_ok=A_ ) __UpperCAmelCase =target_dict.indices # fairseq has the <pad> and <s> switched __UpperCAmelCase =42 __UpperCAmelCase =43 with open(A_ , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(A_ , A_ ) __UpperCAmelCase =WavaVecaPhonemeCTCTokenizer( A_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=A_ , ) __UpperCAmelCase =True if config.feat_extract_norm == """layer""" else False __UpperCAmelCase =WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=A_ , return_attention_mask=A_ , ) __UpperCAmelCase =WavaVecaProcessor(feature_extractor=A_ , tokenizer=A_ ) processor.save_pretrained(A_ ) __UpperCAmelCase =UniSpeechForCTC(A_ ) else: __UpperCAmelCase =UniSpeechForPreTraining(A_ ) if is_finetuned: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] ), """w2v_path""": checkpoint_path} ) else: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) __UpperCAmelCase =model[0].eval() recursively_load_weights(A_ , A_ , A_ ) hf_unispeech.save_pretrained(A_ ) if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) __A = parser.parse_args() convert_unispeech_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
68
import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _A ( UpperCamelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase : List[Any] = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline' def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str=0 ) -> Any: __UpperCAmelCase =floats_tensor((1, 3, 128, 128) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ) __UpperCAmelCase =np.random.RandomState(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase ={ """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 3, """strength""": 0.75, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def _a ( self : Optional[Any] ) -> int: __UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =self.get_dummy_inputs() __UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images __UpperCAmelCase =image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) __UpperCAmelCase =np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def _a ( self : Union[str, Any] ) -> Union[str, Any]: __UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __UpperCAmelCase =PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =self.get_dummy_inputs() __UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images __UpperCAmelCase =image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __UpperCAmelCase =np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _a ( self : Optional[Any] ) -> Dict: __UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __UpperCAmelCase =LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) # warmup pass to apply optimizations __UpperCAmelCase =pipe(**self.get_dummy_inputs() ) __UpperCAmelCase =self.get_dummy_inputs() __UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images __UpperCAmelCase =image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __UpperCAmelCase =np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _a ( self : List[Any] ) -> List[str]: __UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __UpperCAmelCase =EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =self.get_dummy_inputs() __UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images __UpperCAmelCase =image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __UpperCAmelCase =np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _a ( self : Union[str, Any] ) -> Optional[Any]: __UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __UpperCAmelCase =EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =self.get_dummy_inputs() __UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images __UpperCAmelCase =image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __UpperCAmelCase =np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _a ( self : Union[str, Any] ) -> Dict: __UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __UpperCAmelCase =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =self.get_dummy_inputs() __UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images __UpperCAmelCase =image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __UpperCAmelCase =np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class _A ( unittest.TestCase ): """simple docstring""" @property def _a ( self : List[str] ) -> Optional[int]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _a ( self : Dict ) -> int: __UpperCAmelCase =ort.SessionOptions() __UpperCAmelCase =False return options def _a ( self : Dict ) -> Any: __UpperCAmelCase =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) __UpperCAmelCase =init_image.resize((768, 512) ) # using the PNDM scheduler by default __UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained( """CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase ="""A fantasy landscape, trending on artstation""" __UpperCAmelCase =np.random.RandomState(0 ) __UpperCAmelCase =pipe( prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__SCREAMING_SNAKE_CASE , output_type="""np""" , ) __UpperCAmelCase =output.images __UpperCAmelCase =images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) __UpperCAmelCase =np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def _a ( self : List[str] ) -> str: __UpperCAmelCase =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) __UpperCAmelCase =init_image.resize((768, 512) ) __UpperCAmelCase =LMSDiscreteScheduler.from_pretrained( """runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" ) __UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase ="""A fantasy landscape, trending on artstation""" __UpperCAmelCase =np.random.RandomState(0 ) __UpperCAmelCase =pipe( prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__SCREAMING_SNAKE_CASE , output_type="""np""" , ) __UpperCAmelCase =output.images __UpperCAmelCase =images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) __UpperCAmelCase =np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
68
1
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFXLMRobertaModel @require_tf @require_sentencepiece @require_tokenizers class _A ( unittest.TestCase ): """simple docstring""" @slow def _a ( self : List[Any] ) -> Optional[int]: __UpperCAmelCase =TFXLMRobertaModel.from_pretrained("""jplu/tf-xlm-roberta-base""" ) __UpperCAmelCase ={ """input_ids""": tf.convert_to_tensor([[0, 2646, 10269, 83, 99942, 2]] , dtype=tf.intaa ), # "My dog is cute" """attention_mask""": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ), } __UpperCAmelCase =model(__SCREAMING_SNAKE_CASE )["""last_hidden_state"""] __UpperCAmelCase =tf.TensorShape((1, 6, 768) ) self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE ) # compare the actual values for a slice. __UpperCAmelCase =tf.convert_to_tensor( [ [ [0.0_681_762, 0.10_894_451, 0.06_772_504], [-0.06_423_668, 0.02_366_615, 0.04_329_344], [-0.06_057_295, 0.09_974_135, -0.00_070_584], ] ] , dtype=tf.floataa , ) self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
68
import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors __A = logging.getLogger(__name__) class _A ( UpperCamelCase ): """simple docstring""" lowerCamelCase : Optional[Any] = 'sequence-classification' def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Tuple ) -> Optional[Any]: if type(__SCREAMING_SNAKE_CASE ) == dict: __UpperCAmelCase =Namespace(**__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =glue_output_modes[hparams.task] __UpperCAmelCase =glue_tasks_num_labels[hparams.task] super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.mode ) def _a ( self : str , **__SCREAMING_SNAKE_CASE : Dict ) -> List[str]: return self.model(**__SCREAMING_SNAKE_CASE ) def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict ) -> List[Any]: __UpperCAmelCase ={"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: __UpperCAmelCase =batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None __UpperCAmelCase =self(**__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =outputs[0] __UpperCAmelCase =self.trainer.lr_schedulers[0]["""scheduler"""] __UpperCAmelCase ={"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def _a ( self : Tuple ) -> List[Any]: __UpperCAmelCase =self.hparams __UpperCAmelCase =processors[args.task]() __UpperCAmelCase =processor.get_labels() for mode in ["train", "dev"]: __UpperCAmelCase =self._feature_file(__SCREAMING_SNAKE_CASE ) if os.path.exists(__SCREAMING_SNAKE_CASE ) and not args.overwrite_cache: logger.info("""Loading features from cached file %s""" , __SCREAMING_SNAKE_CASE ) else: logger.info("""Creating features from dataset file at %s""" , args.data_dir ) __UpperCAmelCase =( processor.get_dev_examples(args.data_dir ) if mode == """dev""" else processor.get_train_examples(args.data_dir ) ) __UpperCAmelCase =convert_examples_to_features( __SCREAMING_SNAKE_CASE , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info("""Saving features into cached file %s""" , __SCREAMING_SNAKE_CASE ) torch.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : bool = False ) -> DataLoader: __UpperCAmelCase ="""dev""" if mode == """test""" else mode __UpperCAmelCase =self._feature_file(__SCREAMING_SNAKE_CASE ) logger.info("""Loading features from cached file %s""" , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =torch.load(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =torch.tensor([f.input_ids for f in features] , dtype=torch.long ) __UpperCAmelCase =torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) __UpperCAmelCase =torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) if self.hparams.glue_output_mode == "classification": __UpperCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.long ) elif self.hparams.glue_output_mode == "regression": __UpperCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.float ) return DataLoader( TensorDataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , batch_size=__SCREAMING_SNAKE_CASE , shuffle=__SCREAMING_SNAKE_CASE , ) def _a ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int ) -> str: __UpperCAmelCase ={"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: __UpperCAmelCase =batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None __UpperCAmelCase =self(**__SCREAMING_SNAKE_CASE ) __UpperCAmelCase , __UpperCAmelCase =outputs[:2] __UpperCAmelCase =logits.detach().cpu().numpy() __UpperCAmelCase =inputs["""labels"""].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Any ) -> tuple: __UpperCAmelCase =torch.stack([x["""val_loss"""] for x in outputs] ).mean().detach().cpu().item() __UpperCAmelCase =np.concatenate([x["""pred"""] for x in outputs] , axis=0 ) if self.hparams.glue_output_mode == "classification": __UpperCAmelCase =np.argmax(__SCREAMING_SNAKE_CASE , axis=1 ) elif self.hparams.glue_output_mode == "regression": __UpperCAmelCase =np.squeeze(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =np.concatenate([x["""target"""] for x in outputs] , axis=0 ) __UpperCAmelCase =[[] for _ in range(out_label_ids.shape[0] )] __UpperCAmelCase =[[] for _ in range(out_label_ids.shape[0] )] __UpperCAmelCase ={**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )} __UpperCAmelCase =dict(results.items() ) __UpperCAmelCase =results return ret, preds_list, out_label_list def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : list ) -> dict: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._eval_end(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =ret["""log"""] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : List[str] ) -> dict: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._eval_end(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =ret["""log"""] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def _a ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict ) -> Optional[Any]: BaseTransformer.add_model_specific_args(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) parser.add_argument( """--max_seq_length""" , default=128 , type=__SCREAMING_SNAKE_CASE , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--task""" , default="""""" , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""The GLUE task to run""" , ) parser.add_argument( """--gpus""" , default=0 , type=__SCREAMING_SNAKE_CASE , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , ) parser.add_argument( """--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" ) return parser def lowercase__ ( ) -> str: """simple docstring""" __UpperCAmelCase =argparse.ArgumentParser() add_generic_args(A_ , os.getcwd() ) __UpperCAmelCase =GLUETransformer.add_model_specific_args(A_ , os.getcwd() ) __UpperCAmelCase =parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: __UpperCAmelCase =os.path.join( """./results""" , F'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' , ) os.makedirs(args.output_dir ) __UpperCAmelCase =GLUETransformer(A_ ) __UpperCAmelCase =generic_train(A_ , A_ ) # Optionally, predict on dev set and write to output_dir if args.do_predict: __UpperCAmelCase =sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=A_ ) ) __UpperCAmelCase =model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(A_ ) if __name__ == "__main__": main()
68
1
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..bit import BitConfig __A = logging.get_logger(__name__) __A = { "Intel/dpt-large": "https://huggingface.co/Intel/dpt-large/resolve/main/config.json", # See all DPT models at https://huggingface.co/models?filter=dpt } class _A ( UpperCamelCase ): """simple docstring""" lowerCamelCase : Optional[Any] = 'dpt' def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : str=768 , __SCREAMING_SNAKE_CASE : Optional[int]=12 , __SCREAMING_SNAKE_CASE : int=12 , __SCREAMING_SNAKE_CASE : Any=3072 , __SCREAMING_SNAKE_CASE : int="gelu" , __SCREAMING_SNAKE_CASE : Any=0.0 , __SCREAMING_SNAKE_CASE : Any=0.0 , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : List[str]=1e-12 , __SCREAMING_SNAKE_CASE : Optional[Any]=384 , __SCREAMING_SNAKE_CASE : List[str]=16 , __SCREAMING_SNAKE_CASE : int=3 , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Any=[2, 5, 8, 11] , __SCREAMING_SNAKE_CASE : Dict="project" , __SCREAMING_SNAKE_CASE : int=[4, 2, 1, 0.5] , __SCREAMING_SNAKE_CASE : Union[str, Any]=[96, 192, 384, 768] , __SCREAMING_SNAKE_CASE : str=256 , __SCREAMING_SNAKE_CASE : int=-1 , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : str=0.4 , __SCREAMING_SNAKE_CASE : Any=255 , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=[1, 1024, 24, 24] , __SCREAMING_SNAKE_CASE : Tuple=[0, 1] , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , **__SCREAMING_SNAKE_CASE : List[Any] , ) -> Optional[int]: super().__init__(**__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =hidden_size __UpperCAmelCase =is_hybrid if self.is_hybrid: if backbone_config is None: logger.info("""Initializing the config with a `BiT` backbone.""" ) __UpperCAmelCase ={ """global_padding""": """same""", """layer_type""": """bottleneck""", """depths""": [3, 4, 9], """out_features""": ["""stage1""", """stage2""", """stage3"""], """embedding_dynamic_padding""": True, } __UpperCAmelCase =BitConfig(**__SCREAMING_SNAKE_CASE ) elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): logger.info("""Initializing the config with a `BiT` backbone.""" ) __UpperCAmelCase =BitConfig(**__SCREAMING_SNAKE_CASE ) elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __UpperCAmelCase =backbone_config else: raise ValueError( f'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' ) __UpperCAmelCase =backbone_featmap_shape __UpperCAmelCase =neck_ignore_stages if readout_type != "project": raise ValueError("""Readout type must be 'project' when using `DPT-hybrid` mode.""" ) else: __UpperCAmelCase =None __UpperCAmelCase =None __UpperCAmelCase =[] __UpperCAmelCase =num_hidden_layers __UpperCAmelCase =num_attention_heads __UpperCAmelCase =intermediate_size __UpperCAmelCase =hidden_act __UpperCAmelCase =hidden_dropout_prob __UpperCAmelCase =attention_probs_dropout_prob __UpperCAmelCase =initializer_range __UpperCAmelCase =layer_norm_eps __UpperCAmelCase =image_size __UpperCAmelCase =patch_size __UpperCAmelCase =num_channels __UpperCAmelCase =qkv_bias __UpperCAmelCase =backbone_out_indices if readout_type not in ["ignore", "add", "project"]: raise ValueError("""Readout_type must be one of ['ignore', 'add', 'project']""" ) __UpperCAmelCase =readout_type __UpperCAmelCase =reassemble_factors __UpperCAmelCase =neck_hidden_sizes __UpperCAmelCase =fusion_hidden_size __UpperCAmelCase =head_in_index __UpperCAmelCase =use_batch_norm_in_fusion_residual # auxiliary head attributes (semantic segmentation) __UpperCAmelCase =use_auxiliary_head __UpperCAmelCase =auxiliary_loss_weight __UpperCAmelCase =semantic_loss_ignore_index __UpperCAmelCase =semantic_classifier_dropout def _a ( self : str ) -> Any: __UpperCAmelCase =copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: __UpperCAmelCase =self.backbone_config.to_dict() __UpperCAmelCase =self.__class__.model_type return output
68
def lowercase__ ( A_: int , A_: int ) -> int: """simple docstring""" return 1 if input_a == input_a else 0 def lowercase__ ( ) -> None: """simple docstring""" assert xnor_gate(0 , 0 ) == 1 assert xnor_gate(0 , 1 ) == 0 assert xnor_gate(1 , 0 ) == 0 assert xnor_gate(1 , 1 ) == 1 if __name__ == "__main__": print(xnor_gate(0, 0)) print(xnor_gate(0, 1)) print(xnor_gate(1, 0)) print(xnor_gate(1, 1))
68
1
import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def lowercase__ ( ) -> List[str]: """simple docstring""" raise RuntimeError("""CUDA out of memory.""" ) class _A ( nn.Module ): """simple docstring""" def __init__( self : List[str] ) -> str: super().__init__() __UpperCAmelCase =nn.Linear(3 , 4 ) __UpperCAmelCase =nn.BatchNormad(4 ) __UpperCAmelCase =nn.Linear(4 , 5 ) def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : str ) -> int: return self.lineara(self.batchnorm(self.lineara(__SCREAMING_SNAKE_CASE ) ) ) class _A ( unittest.TestCase ): """simple docstring""" def _a ( self : Dict ) -> List[Any]: __UpperCAmelCase =[] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(__SCREAMING_SNAKE_CASE : List[str] ): nonlocal batch_sizes batch_sizes.append(__SCREAMING_SNAKE_CASE ) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(__SCREAMING_SNAKE_CASE , [128, 64, 32, 16, 8] ) def _a ( self : List[Any] ) -> Optional[Any]: __UpperCAmelCase =[] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] ): nonlocal batch_sizes batch_sizes.append(__SCREAMING_SNAKE_CASE ) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga __UpperCAmelCase , __UpperCAmelCase =mock_training_loop_function("""hello""" ) self.assertListEqual(__SCREAMING_SNAKE_CASE , [128, 64, 32, 16, 8] ) self.assertListEqual([bs, arga] , [8, """hello"""] ) def _a ( self : Tuple ) -> Optional[Any]: @find_executable_batch_size(starting_batch_size=0 ) def mock_training_loop_function(__SCREAMING_SNAKE_CASE : Any ): pass with self.assertRaises(__SCREAMING_SNAKE_CASE ) as cm: mock_training_loop_function() self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] ) def _a ( self : str ) -> Dict: @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(__SCREAMING_SNAKE_CASE : List[Any] ): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(__SCREAMING_SNAKE_CASE ) as cm: mock_training_loop_function() self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] ) def _a ( self : Optional[Any] ) -> List[str]: @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(__SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any ): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(__SCREAMING_SNAKE_CASE ) as cm: mock_training_loop_function(128 , """hello""" , """world""" ) self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0] ) self.assertIn("""`f(arg1='hello', arg2='world')""" , cm.exception.args[0] ) def _a ( self : Any ) -> Tuple: @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(__SCREAMING_SNAKE_CASE : Tuple ): raise ValueError("""Oops, we had an error!""" ) with self.assertRaises(__SCREAMING_SNAKE_CASE ) as cm: mock_training_loop_function() self.assertIn("""Oops, we had an error!""" , cm.exception.args[0] ) @require_cuda def _a ( self : List[Any] ) -> Optional[int]: __UpperCAmelCase =torch.cuda.memory_allocated() __UpperCAmelCase =ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =release_memory(__SCREAMING_SNAKE_CASE ) self.assertEqual(torch.cuda.memory_allocated() , __SCREAMING_SNAKE_CASE )
68
from __future__ import annotations import bisect def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> int: """simple docstring""" if hi < 0: __UpperCAmelCase =len(A_ ) while lo < hi: __UpperCAmelCase =lo + (hi - lo) // 2 if sorted_collection[mid] < item: __UpperCAmelCase =mid + 1 else: __UpperCAmelCase =mid return lo def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> int: """simple docstring""" if hi < 0: __UpperCAmelCase =len(A_ ) while lo < hi: __UpperCAmelCase =lo + (hi - lo) // 2 if sorted_collection[mid] <= item: __UpperCAmelCase =mid + 1 else: __UpperCAmelCase =mid return lo def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> None: """simple docstring""" sorted_collection.insert(bisect_left(A_ , A_ , A_ , A_ ) , A_ ) def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> None: """simple docstring""" sorted_collection.insert(bisect_right(A_ , A_ , A_ , A_ ) , A_ ) def lowercase__ ( A_: list[int] , A_: int ) -> int | None: """simple docstring""" __UpperCAmelCase =0 __UpperCAmelCase =len(A_ ) - 1 while left <= right: __UpperCAmelCase =left + (right - left) // 2 __UpperCAmelCase =sorted_collection[midpoint] if current_item == item: return midpoint elif item < current_item: __UpperCAmelCase =midpoint - 1 else: __UpperCAmelCase =midpoint + 1 return None def lowercase__ ( A_: list[int] , A_: int ) -> int | None: """simple docstring""" __UpperCAmelCase =bisect.bisect_left(A_ , A_ ) if index != len(A_ ) and sorted_collection[index] == item: return index return None def lowercase__ ( A_: list[int] , A_: int , A_: int , A_: int ) -> int | None: """simple docstring""" if right < left: return None __UpperCAmelCase =left + (right - left) // 2 if sorted_collection[midpoint] == item: return midpoint elif sorted_collection[midpoint] > item: return binary_search_by_recursion(A_ , A_ , A_ , midpoint - 1 ) else: return binary_search_by_recursion(A_ , A_ , midpoint + 1 , A_ ) if __name__ == "__main__": __A = input("Enter numbers separated by comma:\n").strip() __A = sorted(int(item) for item in user_input.split(",")) __A = int(input("Enter a single number to be found in the list:\n")) __A = binary_search(collection, target) if result is None: print(F"""{target} was not found in {collection}.""") else: print(F"""{target} was found at position {result} in {collection}.""")
68
1
def lowercase__ ( A_: list[list[int | float]] ) -> int: """simple docstring""" __UpperCAmelCase =len(A_ ) __UpperCAmelCase =len(matrix[0] ) __UpperCAmelCase =min(A_ , A_ ) for row in range(A_ ): # Check if diagonal element is not zero if matrix[row][row] != 0: # Eliminate all the elements below the diagonal for col in range(row + 1 , A_ ): __UpperCAmelCase =matrix[col][row] / matrix[row][row] for i in range(A_ , A_ ): matrix[col][i] -= multiplier * matrix[row][i] else: # Find a non-zero diagonal element to swap rows __UpperCAmelCase =True for i in range(row + 1 , A_ ): if matrix[i][row] != 0: __UpperCAmelCase , __UpperCAmelCase =matrix[i], matrix[row] __UpperCAmelCase =False break if reduce: rank -= 1 for i in range(A_ ): __UpperCAmelCase =matrix[i][rank] # Reduce the row pointer by one to stay on the same row row -= 1 return rank if __name__ == "__main__": import doctest doctest.testmod()
68
from typing import List from .keymap import KEYMAP, get_character def lowercase__ ( A_: str ) -> str: """simple docstring""" def decorator(A_: int ): __UpperCAmelCase =getattr(A_ , """handle_key""" , [] ) handle += [key] setattr(A_ , """handle_key""" , A_ ) return func return decorator def lowercase__ ( *A_: List[str] ) -> Optional[int]: """simple docstring""" def decorator(A_: Tuple ): __UpperCAmelCase =getattr(A_ , """handle_key""" , [] ) handle += keys setattr(A_ , """handle_key""" , A_ ) return func return decorator class _A ( UpperCamelCase ): """simple docstring""" def __new__( cls : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] ) -> int: __UpperCAmelCase =super().__new__(cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if not hasattr(__SCREAMING_SNAKE_CASE , """key_handler""" ): setattr(__SCREAMING_SNAKE_CASE , """key_handler""" , {} ) setattr(__SCREAMING_SNAKE_CASE , """handle_input""" , KeyHandler.handle_input ) for value in attrs.values(): __UpperCAmelCase =getattr(__SCREAMING_SNAKE_CASE , """handle_key""" , [] ) for key in handled_keys: __UpperCAmelCase =value return new_cls @staticmethod def _a ( cls : Dict ) -> List[Any]: __UpperCAmelCase =get_character() if char != KEYMAP["undefined"]: __UpperCAmelCase =ord(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =cls.key_handler.get(__SCREAMING_SNAKE_CASE ) if handler: __UpperCAmelCase =char return handler(cls ) else: return None def lowercase__ ( cls: str ) -> int: """simple docstring""" return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
68
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_distilbert import DistilBertTokenizer __A = logging.get_logger(__name__) __A = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} __A = { "vocab_file": { "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt", "distilbert-base-uncased-distilled-squad": ( "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt" ), "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt", "distilbert-base-cased-distilled-squad": ( "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt" ), "distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt", "distilbert-base-multilingual-cased": ( "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt" ), }, "tokenizer_file": { "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json", "distilbert-base-uncased-distilled-squad": ( "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json" ), "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json", "distilbert-base-cased-distilled-squad": ( "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json" ), "distilbert-base-german-cased": ( "https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json" ), "distilbert-base-multilingual-cased": ( "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json" ), }, } __A = { "distilbert-base-uncased": 5_12, "distilbert-base-uncased-distilled-squad": 5_12, "distilbert-base-cased": 5_12, "distilbert-base-cased-distilled-squad": 5_12, "distilbert-base-german-cased": 5_12, "distilbert-base-multilingual-cased": 5_12, } __A = { "distilbert-base-uncased": {"do_lower_case": True}, "distilbert-base-uncased-distilled-squad": {"do_lower_case": True}, "distilbert-base-cased": {"do_lower_case": False}, "distilbert-base-cased-distilled-squad": {"do_lower_case": False}, "distilbert-base-german-cased": {"do_lower_case": False}, "distilbert-base-multilingual-cased": {"do_lower_case": False}, } class _A ( UpperCamelCase ): """simple docstring""" lowerCamelCase : List[str] = VOCAB_FILES_NAMES lowerCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase : str = PRETRAINED_INIT_CONFIGURATION lowerCamelCase : Optional[int] = ['input_ids', 'attention_mask'] lowerCamelCase : Optional[int] = DistilBertTokenizer def __init__( self : int , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : List[Any]="[UNK]" , __SCREAMING_SNAKE_CASE : Optional[Any]="[SEP]" , __SCREAMING_SNAKE_CASE : Any="[PAD]" , __SCREAMING_SNAKE_CASE : Tuple="[CLS]" , __SCREAMING_SNAKE_CASE : List[Any]="[MASK]" , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : List[str]=None , **__SCREAMING_SNAKE_CASE : Any , ) -> str: super().__init__( __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenize_chinese_chars=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) __UpperCAmelCase =json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" , __SCREAMING_SNAKE_CASE ) != do_lower_case or normalizer_state.get("""strip_accents""" , __SCREAMING_SNAKE_CASE ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" , __SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars ): __UpperCAmelCase =getattr(__SCREAMING_SNAKE_CASE , normalizer_state.pop("""type""" ) ) __UpperCAmelCase =do_lower_case __UpperCAmelCase =strip_accents __UpperCAmelCase =tokenize_chinese_chars __UpperCAmelCase =normalizer_class(**__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =do_lower_case def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any=None ) -> Optional[Any]: __UpperCAmelCase =[self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]: __UpperCAmelCase =[self.sep_token_id] __UpperCAmelCase =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]: __UpperCAmelCase =self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE ) return tuple(__SCREAMING_SNAKE_CASE )
68
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline else: from .camera import create_pan_cameras from .pipeline_shap_e import ShapEPipeline from .pipeline_shap_e_img2img import ShapEImgaImgPipeline from .renderer import ( BoundingBoxVolume, ImportanceRaySampler, MLPNeRFModelOutput, MLPNeRSTFModel, ShapEParamsProjModel, ShapERenderer, StratifiedRaySampler, VoidNeRFModel, )
68
1
from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf __A = logging.get_logger(__name__) @dataclass class _A ( UpperCamelCase ): """simple docstring""" lowerCamelCase : Optional[int] = [ 'no_inference', 'no_cuda', 'no_tpu', 'no_speed', 'no_memory', 'no_env_print', 'no_multi_process', ] def __init__( self : Any , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Dict: for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: __UpperCAmelCase =deprecated_arg[3:] __UpperCAmelCase =not kwargs.pop(__SCREAMING_SNAKE_CASE ) logger.warning( f'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or''' f''' {positive_arg}={kwargs[positive_arg]}''' ) __UpperCAmelCase =kwargs.pop("""tpu_name""" , self.tpu_name ) __UpperCAmelCase =kwargs.pop("""device_idx""" , self.device_idx ) __UpperCAmelCase =kwargs.pop("""eager_mode""" , self.eager_mode ) __UpperCAmelCase =kwargs.pop("""use_xla""" , self.use_xla ) super().__init__(**__SCREAMING_SNAKE_CASE ) lowerCamelCase : str = field( default=UpperCamelCase , metadata={'help': 'Name of TPU'} , ) lowerCamelCase : int = field( default=0 , metadata={'help': 'CPU / GPU device index. Defaults to 0.'} , ) lowerCamelCase : bool = field(default=UpperCamelCase , metadata={'help': 'Benchmark models in eager model.'} ) lowerCamelCase : bool = field( default=UpperCamelCase , metadata={ 'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.' } , ) @cached_property def _a ( self : List[str] ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]: requires_backends(self , ["""tf"""] ) __UpperCAmelCase =None if self.tpu: try: if self.tpu_name: __UpperCAmelCase =tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: __UpperCAmelCase =tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: __UpperCAmelCase =None return tpu @cached_property def _a ( self : Tuple ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]: requires_backends(self , ["""tf"""] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) __UpperCAmelCase =tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" ) __UpperCAmelCase =tf.distribute.OneDeviceStrategy(device=f'''/gpu:{self.device_idx}''' ) else: tf.config.set_visible_devices([] , """GPU""" ) # disable GPU __UpperCAmelCase =tf.distribute.OneDeviceStrategy(device=f'''/cpu:{self.device_idx}''' ) return strategy @property def _a ( self : Optional[Any] ) -> bool: requires_backends(self , ["""tf"""] ) return self._setup_tpu is not None @property def _a ( self : str ) -> "tf.distribute.Strategy": requires_backends(self , ["""tf"""] ) return self._setup_strategy @property def _a ( self : Dict ) -> Optional[int]: requires_backends(self , ["""tf"""] ) return tf.config.list_physical_devices("""GPU""" ) @property def _a ( self : List[str] ) -> int: requires_backends(self , ["""tf"""] ) if self.cuda: return len(self.gpu_list ) return 0 @property def _a ( self : List[str] ) -> bool: return self.n_gpu > 0
68
import unittest import numpy as np from transformers import RobertaConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.roberta.modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, ) class _A ( unittest.TestCase ): """simple docstring""" def __init__( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=13 , __SCREAMING_SNAKE_CASE : Dict=7 , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : Optional[Any]=99 , __SCREAMING_SNAKE_CASE : Dict=32 , __SCREAMING_SNAKE_CASE : int=5 , __SCREAMING_SNAKE_CASE : Dict=4 , __SCREAMING_SNAKE_CASE : str=37 , __SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : str=512 , __SCREAMING_SNAKE_CASE : Dict=16 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 , __SCREAMING_SNAKE_CASE : List[str]=4 , ) -> Optional[Any]: __UpperCAmelCase =parent __UpperCAmelCase =batch_size __UpperCAmelCase =seq_length __UpperCAmelCase =is_training __UpperCAmelCase =use_attention_mask __UpperCAmelCase =use_token_type_ids __UpperCAmelCase =use_labels __UpperCAmelCase =vocab_size __UpperCAmelCase =hidden_size __UpperCAmelCase =num_hidden_layers __UpperCAmelCase =num_attention_heads __UpperCAmelCase =intermediate_size __UpperCAmelCase =hidden_act __UpperCAmelCase =hidden_dropout_prob __UpperCAmelCase =attention_probs_dropout_prob __UpperCAmelCase =max_position_embeddings __UpperCAmelCase =type_vocab_size __UpperCAmelCase =type_sequence_label_size __UpperCAmelCase =initializer_range __UpperCAmelCase =num_choices def _a ( self : List[Any] ) -> List[str]: __UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase =None if self.use_attention_mask: __UpperCAmelCase =random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase =None if self.use_token_type_ids: __UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCAmelCase =RobertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _a ( self : Tuple ) -> Optional[int]: __UpperCAmelCase =self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =config_and_inputs __UpperCAmelCase ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict def _a ( self : List[str] ) -> Dict: __UpperCAmelCase =self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =config_and_inputs __UpperCAmelCase =True __UpperCAmelCase =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax class _A ( UpperCamelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase : Union[str, Any] = True lowerCamelCase : Union[str, Any] = ( ( FlaxRobertaModel, FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, ) if is_flax_available() else () ) def _a ( self : List[Any] ) -> List[str]: __UpperCAmelCase =FlaxRobertaModelTester(self ) @slow def _a ( self : Optional[Any] ) -> List[Any]: for model_class_name in self.all_model_classes: __UpperCAmelCase =model_class_name.from_pretrained("""roberta-base""" , from_pt=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model(np.ones((1, 1) ) ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
68
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging __A = logging.get_logger(__name__) __A = "▁" __A = {"vocab_file": "sentencepiece.bpe.model"} __A = { "vocab_file": { "facebook/nllb-200-distilled-600M": ( "https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model" ), } } __A = { "facebook/nllb-200-distilled-600M": 10_24, } # fmt: off __A = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"] class _A ( UpperCamelCase ): """simple docstring""" lowerCamelCase : Tuple = VOCAB_FILES_NAMES lowerCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase : List[Any] = ['input_ids', 'attention_mask'] lowerCamelCase : List[int] = [] lowerCamelCase : List[int] = [] def __init__( self : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any="<s>" , __SCREAMING_SNAKE_CASE : Tuple="</s>" , __SCREAMING_SNAKE_CASE : List[Any]="</s>" , __SCREAMING_SNAKE_CASE : Optional[int]="<s>" , __SCREAMING_SNAKE_CASE : Dict="<unk>" , __SCREAMING_SNAKE_CASE : Optional[int]="<pad>" , __SCREAMING_SNAKE_CASE : Any="<mask>" , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Optional[int]=False , **__SCREAMING_SNAKE_CASE : Any , ) -> Optional[Any]: # Mask token behave like a normal word, i.e. include the space before it __UpperCAmelCase =AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token __UpperCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs __UpperCAmelCase =legacy_behaviour super().__init__( bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , src_lang=__SCREAMING_SNAKE_CASE , tgt_lang=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) __UpperCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__SCREAMING_SNAKE_CASE ) ) __UpperCAmelCase =vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' # spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s' # Mimic fairseq token-to-id alignment for the first 4 token __UpperCAmelCase ={"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab __UpperCAmelCase =1 __UpperCAmelCase =len(self.sp_model ) __UpperCAmelCase ={ code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__SCREAMING_SNAKE_CASE ) } __UpperCAmelCase ={v: k for k, v in self.lang_code_to_id.items()} __UpperCAmelCase =len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) __UpperCAmelCase ={v: k for k, v in self.fairseq_tokens_to_ids.items()} __UpperCAmelCase =list(self.lang_code_to_id.keys() ) if additional_special_tokens is not None: # Only add those special tokens if they are not already there. self._additional_special_tokens.extend( [t for t in additional_special_tokens if t not in self._additional_special_tokens] ) __UpperCAmelCase =src_lang if src_lang is not None else """eng_Latn""" __UpperCAmelCase =self.lang_code_to_id[self._src_lang] __UpperCAmelCase =tgt_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self : Tuple ) -> Optional[Any]: __UpperCAmelCase =self.__dict__.copy() __UpperCAmelCase =None __UpperCAmelCase =self.sp_model.serialized_model_proto() return state def __setstate__( self : Dict , __SCREAMING_SNAKE_CASE : Dict ) -> Tuple: __UpperCAmelCase =d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): __UpperCAmelCase ={} __UpperCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) @property def _a ( self : Any ) -> int: return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def _a ( self : str ) -> str: return self._src_lang @src_lang.setter def _a ( self : Any , __SCREAMING_SNAKE_CASE : str ) -> None: __UpperCAmelCase =new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def _a ( self : Any , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None , __SCREAMING_SNAKE_CASE : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =[1] * len(self.prefix_tokens ) __UpperCAmelCase =[1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]: __UpperCAmelCase =[self.sep_token_id] __UpperCAmelCase =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] , __SCREAMING_SNAKE_CASE : Optional[str] , **__SCREAMING_SNAKE_CASE : Tuple ) -> Dict: if src_lang is None or tgt_lang is None: raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" ) __UpperCAmelCase =src_lang __UpperCAmelCase =self(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =tgt_lang_id return inputs def _a ( self : Any ) -> List[Any]: __UpperCAmelCase ={self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str ) -> List[str]: return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE ) def _a ( self : Dict , __SCREAMING_SNAKE_CASE : str ) -> int: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __UpperCAmelCase =self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Dict: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _a ( self : str , __SCREAMING_SNAKE_CASE : Tuple ) -> int: __UpperCAmelCase ="""""".join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , """ """ ).strip() return out_string def _a ( self : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__SCREAMING_SNAKE_CASE ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __UpperCAmelCase =os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(__SCREAMING_SNAKE_CASE , """wb""" ) as fi: __UpperCAmelCase =self.sp_model.serialized_model_proto() fi.write(__SCREAMING_SNAKE_CASE ) return (out_vocab_file,) def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str = "eng_Latn" , __SCREAMING_SNAKE_CASE : Optional[List[str]] = None , __SCREAMING_SNAKE_CASE : str = "fra_Latn" , **__SCREAMING_SNAKE_CASE : Dict , ) -> BatchEncoding: __UpperCAmelCase =src_lang __UpperCAmelCase =tgt_lang return super().prepare_seqaseq_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def _a ( self : Optional[int] ) -> Any: return self.set_src_lang_special_tokens(self.src_lang ) def _a ( self : Any ) -> Any: return self.set_tgt_lang_special_tokens(self.tgt_lang ) def _a ( self : Any , __SCREAMING_SNAKE_CASE : List[str] ) -> None: __UpperCAmelCase =self.lang_code_to_id[src_lang] if self.legacy_behaviour: __UpperCAmelCase =[] __UpperCAmelCase =[self.eos_token_id, self.cur_lang_code] else: __UpperCAmelCase =[self.cur_lang_code] __UpperCAmelCase =[self.eos_token_id] def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str ) -> None: __UpperCAmelCase =self.lang_code_to_id[lang] if self.legacy_behaviour: __UpperCAmelCase =[] __UpperCAmelCase =[self.eos_token_id, self.cur_lang_code] else: __UpperCAmelCase =[self.cur_lang_code] __UpperCAmelCase =[self.eos_token_id]
68
from __future__ import annotations def lowercase__ ( A_: list[list[int]] ) -> int: """simple docstring""" for i in range(1 , len(matrix[0] ) ): matrix[0][i] += matrix[0][i - 1] # preprocessing the first column for i in range(1 , len(A_ ) ): matrix[i][0] += matrix[i - 1][0] # updating the path cost for current position for i in range(1 , len(A_ ) ): for j in range(1 , len(matrix[0] ) ): matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] ) return matrix[-1][-1] if __name__ == "__main__": import doctest doctest.testmod()
68
1
import math def lowercase__ ( ) -> None: """simple docstring""" __UpperCAmelCase =input("""Enter message: """ ) __UpperCAmelCase =int(input(F'''Enter key [2-{len(A_ ) - 1}]: ''' ) ) __UpperCAmelCase =input("""Encryption/Decryption [e/d]: """ ) if mode.lower().startswith("""e""" ): __UpperCAmelCase =encrypt_message(A_ , A_ ) elif mode.lower().startswith("""d""" ): __UpperCAmelCase =decrypt_message(A_ , A_ ) # Append pipe symbol (vertical bar) to identify spaces at the end. print(F'''Output:\n{text + "|"}''' ) def lowercase__ ( A_: int , A_: str ) -> str: """simple docstring""" __UpperCAmelCase =[""""""] * key for col in range(A_ ): __UpperCAmelCase =col while pointer < len(A_ ): cipher_text[col] += message[pointer] pointer += key return "".join(A_ ) def lowercase__ ( A_: int , A_: str ) -> str: """simple docstring""" __UpperCAmelCase =math.ceil(len(A_ ) / key ) __UpperCAmelCase =key __UpperCAmelCase =(num_cols * num_rows) - len(A_ ) __UpperCAmelCase =[""""""] * num_cols __UpperCAmelCase =0 __UpperCAmelCase =0 for symbol in message: plain_text[col] += symbol col += 1 if ( (col == num_cols) or (col == num_cols - 1) and (row >= num_rows - num_shaded_boxes) ): __UpperCAmelCase =0 row += 1 return "".join(A_ ) if __name__ == "__main__": import doctest doctest.testmod() main()
68
import numpy as np from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey def lowercase__ ( A_: int , A_: int , A_: int , A_: int , A_: int , A_: int ) -> np.ndarray: """simple docstring""" if (ksize % 2) == 0: __UpperCAmelCase =ksize + 1 __UpperCAmelCase =np.zeros((ksize, ksize) , dtype=np.floataa ) # each value for y in range(A_ ): for x in range(A_ ): # distance from center __UpperCAmelCase =x - ksize // 2 __UpperCAmelCase =y - ksize // 2 # degree to radiant __UpperCAmelCase =theta / 180 * np.pi __UpperCAmelCase =np.cos(_theta ) __UpperCAmelCase =np.sin(_theta ) # get kernel x __UpperCAmelCase =cos_theta * px + sin_theta * py # get kernel y __UpperCAmelCase =-sin_theta * px + cos_theta * py # fill kernel __UpperCAmelCase =np.exp( -(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi ) return gabor if __name__ == "__main__": import doctest doctest.testmod() # read original image __A = imread("../image_data/lena.jpg") # turn image in gray scale value __A = cvtColor(img, COLOR_BGR2GRAY) # Apply multiple Kernel to detect edges __A = np.zeros(gray.shape[:2]) for theta in [0, 30, 60, 90, 1_20, 1_50]: __A = gabor_filter_kernel(10, 8, theta, 10, 0, 0) out += filteraD(gray, CV_8UC3, kernel_aa) __A = out / out.max() * 2_55 __A = out.astype(np.uinta) imshow("Original", gray) imshow("Gabor filter with 20x20 mask and 6 directions", out) waitKey(0)
68
1
from typing import List, Optional, Union import torch from transformers import ( XLMRobertaTokenizer, ) from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) from .text_encoder import MultilingualCLIP __A = logging.get_logger(__name__) # pylint: disable=invalid-name __A = "\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\")\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\")\n >>> pipe.to(\"cuda\")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save(\"cat.png\")\n ```\n" def lowercase__ ( A_: Tuple , A_: List[str] , A_: str=8 ) -> Union[str, Any]: """simple docstring""" __UpperCAmelCase =h // scale_factor**2 if h % scale_factor**2 != 0: new_h += 1 __UpperCAmelCase =w // scale_factor**2 if w % scale_factor**2 != 0: new_w += 1 return new_h * scale_factor, new_w * scale_factor class _A ( UpperCamelCase ): """simple docstring""" def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : MultilingualCLIP , __SCREAMING_SNAKE_CASE : XLMRobertaTokenizer , __SCREAMING_SNAKE_CASE : UNetaDConditionModel , __SCREAMING_SNAKE_CASE : Union[DDIMScheduler, DDPMScheduler] , __SCREAMING_SNAKE_CASE : VQModel , ) -> Optional[Any]: super().__init__() self.register_modules( text_encoder=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , movq=__SCREAMING_SNAKE_CASE , ) __UpperCAmelCase =2 ** (len(self.movq.config.block_out_channels ) - 1) def _a ( self : int , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]: if latents is None: __UpperCAmelCase =randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE ) else: if latents.shape != shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) __UpperCAmelCase =latents.to(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =latents * scheduler.init_noise_sigma return latents def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int=None , ) -> Any: __UpperCAmelCase =len(__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else 1 # get prompt text embeddings __UpperCAmelCase =self.tokenizer( __SCREAMING_SNAKE_CASE , padding="""max_length""" , truncation=__SCREAMING_SNAKE_CASE , max_length=77 , return_attention_mask=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" , ) __UpperCAmelCase =text_inputs.input_ids __UpperCAmelCase =self.tokenizer(__SCREAMING_SNAKE_CASE , padding="""longest""" , return_tensors="""pt""" ).input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __UpperCAmelCase =self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) __UpperCAmelCase =text_input_ids.to(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =text_inputs.attention_mask.to(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase , __UpperCAmelCase =self.text_encoder( input_ids=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =prompt_embeds.repeat_interleave(__SCREAMING_SNAKE_CASE , dim=0 ) __UpperCAmelCase =text_encoder_hidden_states.repeat_interleave(__SCREAMING_SNAKE_CASE , dim=0 ) __UpperCAmelCase =text_mask.repeat_interleave(__SCREAMING_SNAKE_CASE , dim=0 ) if do_classifier_free_guidance: __UpperCAmelCase =42 if negative_prompt is None: __UpperCAmelCase =[""""""] * batch_size elif type(__SCREAMING_SNAKE_CASE ) is not type(__SCREAMING_SNAKE_CASE ): raise TypeError( f'''`negative_prompt` should be the same type to `prompt`, but got {type(__SCREAMING_SNAKE_CASE )} !=''' f''' {type(__SCREAMING_SNAKE_CASE )}.''' ) elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __UpperCAmelCase =[negative_prompt] elif batch_size != len(__SCREAMING_SNAKE_CASE ): raise ValueError( f'''`negative_prompt`: {negative_prompt} has batch size {len(__SCREAMING_SNAKE_CASE )}, but `prompt`:''' f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches''' """ the batch size of `prompt`.""" ) else: __UpperCAmelCase =negative_prompt __UpperCAmelCase =self.tokenizer( __SCREAMING_SNAKE_CASE , padding="""max_length""" , max_length=77 , truncation=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" , ) __UpperCAmelCase =uncond_input.input_ids.to(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =uncond_input.attention_mask.to(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase , __UpperCAmelCase =self.text_encoder( input_ids=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method __UpperCAmelCase =negative_prompt_embeds.shape[1] __UpperCAmelCase =negative_prompt_embeds.repeat(1 , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =negative_prompt_embeds.view(batch_size * num_images_per_prompt , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =uncond_text_encoder_hidden_states.shape[1] __UpperCAmelCase =uncond_text_encoder_hidden_states.repeat(1 , __SCREAMING_SNAKE_CASE , 1 ) __UpperCAmelCase =uncond_text_encoder_hidden_states.view( batch_size * num_images_per_prompt , __SCREAMING_SNAKE_CASE , -1 ) __UpperCAmelCase =uncond_text_mask.repeat_interleave(__SCREAMING_SNAKE_CASE , dim=0 ) # done duplicates # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes __UpperCAmelCase =torch.cat([negative_prompt_embeds, prompt_embeds] ) __UpperCAmelCase =torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] ) __UpperCAmelCase =torch.cat([uncond_text_mask, text_mask] ) return prompt_embeds, text_encoder_hidden_states, text_mask def _a ( self : Dict , __SCREAMING_SNAKE_CASE : List[str]=0 ) -> Dict: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""" ) __UpperCAmelCase =torch.device(f'''cuda:{gpu_id}''' ) __UpperCAmelCase =[ self.unet, self.text_encoder, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict=0 ) -> Any: if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ): from accelerate import cpu_offload_with_hook else: raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" ) __UpperCAmelCase =torch.device(f'''cuda:{gpu_id}''' ) if self.device.type != "cpu": self.to("""cpu""" , silence_dtype_warnings=__SCREAMING_SNAKE_CASE ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) __UpperCAmelCase =None for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]: __UpperCAmelCase , __UpperCAmelCase =cpu_offload_with_hook(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prev_module_hook=__SCREAMING_SNAKE_CASE ) if self.safety_checker is not None: __UpperCAmelCase , __UpperCAmelCase =cpu_offload_with_hook(self.safety_checker , __SCREAMING_SNAKE_CASE , prev_module_hook=__SCREAMING_SNAKE_CASE ) # We'll offload the last model manually. __UpperCAmelCase =hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def _a ( self : Union[str, Any] ) -> Dict: if not hasattr(self.unet , """_hf_hook""" ): return self.device for module in self.unet.modules(): if ( hasattr(__SCREAMING_SNAKE_CASE , """_hf_hook""" ) and hasattr(module._hf_hook , """execution_device""" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(__SCREAMING_SNAKE_CASE ) def __call__( self : Any , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, List[torch.FloatTensor]] , __SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, List[torch.FloatTensor]] , __SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 100 , __SCREAMING_SNAKE_CASE : float = 4.0 , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , ) -> Optional[int]: if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __UpperCAmelCase =1 elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __UpperCAmelCase =len(__SCREAMING_SNAKE_CASE ) else: raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__SCREAMING_SNAKE_CASE )}''' ) __UpperCAmelCase =self._execution_device __UpperCAmelCase =batch_size * num_images_per_prompt __UpperCAmelCase =guidance_scale > 1.0 __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._encode_prompt( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __UpperCAmelCase =torch.cat(__SCREAMING_SNAKE_CASE , dim=0 ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __UpperCAmelCase =torch.cat(__SCREAMING_SNAKE_CASE , dim=0 ) if do_classifier_free_guidance: __UpperCAmelCase =image_embeds.repeat_interleave(__SCREAMING_SNAKE_CASE , dim=0 ) __UpperCAmelCase =negative_image_embeds.repeat_interleave(__SCREAMING_SNAKE_CASE , dim=0 ) __UpperCAmelCase =torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to( dtype=prompt_embeds.dtype , device=__SCREAMING_SNAKE_CASE ) self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =self.scheduler.timesteps __UpperCAmelCase =self.unet.config.in_channels __UpperCAmelCase , __UpperCAmelCase =get_new_h_w(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.movq_scale_factor ) # create initial latent __UpperCAmelCase =self.prepare_latents( (batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.scheduler , ) for i, t in enumerate(self.progress_bar(__SCREAMING_SNAKE_CASE ) ): # expand the latents if we are doing classifier free guidance __UpperCAmelCase =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents __UpperCAmelCase ={"""text_embeds""": prompt_embeds, """image_embeds""": image_embeds} __UpperCAmelCase =self.unet( sample=__SCREAMING_SNAKE_CASE , timestep=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , added_cond_kwargs=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , )[0] if do_classifier_free_guidance: __UpperCAmelCase , __UpperCAmelCase =noise_pred.split(latents.shape[1] , dim=1 ) __UpperCAmelCase , __UpperCAmelCase =noise_pred.chunk(2 ) __UpperCAmelCase , __UpperCAmelCase =variance_pred.chunk(2 ) __UpperCAmelCase =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) __UpperCAmelCase =torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , """variance_type""" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): __UpperCAmelCase , __UpperCAmelCase =noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 __UpperCAmelCase =self.scheduler.step( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , ).prev_sample # post-processing __UpperCAmelCase =self.movq.decode(__SCREAMING_SNAKE_CASE , force_not_quantize=__SCREAMING_SNAKE_CASE )["""sample"""] if output_type not in ["pt", "np", "pil"]: raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' ) if output_type in ["np", "pil"]: __UpperCAmelCase =image * 0.5 + 0.5 __UpperCAmelCase =image.clamp(0 , 1 ) __UpperCAmelCase =image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": __UpperCAmelCase =self.numpy_to_pil(__SCREAMING_SNAKE_CASE ) if not return_dict: return (image,) return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE )
68
import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _A : """simple docstring""" def __init__( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any]=13 , __SCREAMING_SNAKE_CASE : Dict=32 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Optional[int]=3 , __SCREAMING_SNAKE_CASE : List[str]=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=[1, 2, 1] , __SCREAMING_SNAKE_CASE : List[Any]=[2, 2, 4] , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : Any=2.0 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : int=0.0 , __SCREAMING_SNAKE_CASE : Dict=0.0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : List[Any]=0.02 , __SCREAMING_SNAKE_CASE : Tuple=1e-5 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Any=10 , __SCREAMING_SNAKE_CASE : Dict=8 , ) -> List[Any]: __UpperCAmelCase =parent __UpperCAmelCase =batch_size __UpperCAmelCase =image_size __UpperCAmelCase =patch_size __UpperCAmelCase =num_channels __UpperCAmelCase =embed_dim __UpperCAmelCase =depths __UpperCAmelCase =num_heads __UpperCAmelCase =window_size __UpperCAmelCase =mlp_ratio __UpperCAmelCase =qkv_bias __UpperCAmelCase =hidden_dropout_prob __UpperCAmelCase =attention_probs_dropout_prob __UpperCAmelCase =drop_path_rate __UpperCAmelCase =hidden_act __UpperCAmelCase =use_absolute_embeddings __UpperCAmelCase =patch_norm __UpperCAmelCase =layer_norm_eps __UpperCAmelCase =initializer_range __UpperCAmelCase =is_training __UpperCAmelCase =scope __UpperCAmelCase =use_labels __UpperCAmelCase =type_sequence_label_size __UpperCAmelCase =encoder_stride def _a ( self : Tuple ) -> Optional[int]: __UpperCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCAmelCase =None if self.use_labels: __UpperCAmelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase =self.get_config() return config, pixel_values, labels def _a ( self : List[Any] ) -> Optional[Any]: return SwinvaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def _a ( self : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict ) -> Optional[int]: __UpperCAmelCase =SwinvaModel(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __UpperCAmelCase =model(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) __UpperCAmelCase =int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def _a ( self : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int ) -> Tuple: __UpperCAmelCase =SwinvaForMaskedImageModeling(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __UpperCAmelCase =model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images __UpperCAmelCase =1 __UpperCAmelCase =SwinvaForMaskedImageModeling(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __UpperCAmelCase =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __UpperCAmelCase =model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple: __UpperCAmelCase =self.type_sequence_label_size __UpperCAmelCase =SwinvaForImageClassification(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __UpperCAmelCase =model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _a ( self : List[str] ) -> Tuple: __UpperCAmelCase =self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =config_and_inputs __UpperCAmelCase ={"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _A ( UpperCamelCase , UpperCamelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase : Optional[int] = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) lowerCamelCase : Tuple = ( {'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification} if is_torch_available() else {} ) lowerCamelCase : Dict = False lowerCamelCase : Tuple = False lowerCamelCase : List[str] = False lowerCamelCase : Tuple = False def _a ( self : str ) -> str: __UpperCAmelCase =SwinvaModelTester(self ) __UpperCAmelCase =ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , embed_dim=37 ) def _a ( self : List[Any] ) -> Optional[int]: self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _a ( self : str ) -> str: __UpperCAmelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) @unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" ) def _a ( self : Tuple ) -> Tuple: pass @unittest.skip(reason="""Swinv2 does not use inputs_embeds""" ) def _a ( self : Optional[Any] ) -> int: pass def _a ( self : Tuple ) -> int: __UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __UpperCAmelCase =model.get_output_embeddings() self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) ) def _a ( self : str ) -> List[str]: __UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCAmelCase =[*signature.parameters.keys()] __UpperCAmelCase =["""pixel_values"""] self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE ) def _a ( self : Tuple ) -> Tuple: __UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase =True for model_class in self.all_model_classes: __UpperCAmelCase =True __UpperCAmelCase =False __UpperCAmelCase =True __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): __UpperCAmelCase =model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) __UpperCAmelCase =outputs.attentions __UpperCAmelCase =len(self.model_tester.depths ) self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __UpperCAmelCase =True __UpperCAmelCase =config.window_size**2 __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): __UpperCAmelCase =model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) __UpperCAmelCase =outputs.attentions self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) __UpperCAmelCase =len(__SCREAMING_SNAKE_CASE ) # Check attention is always last and order is fine __UpperCAmelCase =True __UpperCAmelCase =True __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): __UpperCAmelCase =model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) if hasattr(self.model_tester , """num_hidden_states_types""" ): __UpperCAmelCase =self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states __UpperCAmelCase =2 self.assertEqual(out_len + added_hidden_states , len(__SCREAMING_SNAKE_CASE ) ) __UpperCAmelCase =outputs.attentions self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> int: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): __UpperCAmelCase =model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) __UpperCAmelCase =outputs.hidden_states __UpperCAmelCase =getattr( self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) # Swinv2 has a different seq_length __UpperCAmelCase =( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) __UpperCAmelCase =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) __UpperCAmelCase =outputs.reshaped_hidden_states self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =reshaped_hidden_states[0].shape __UpperCAmelCase =( reshaped_hidden_states[0].view(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def _a ( self : str ) -> Union[str, Any]: __UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase =( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: __UpperCAmelCase =True self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase =True self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : Optional[int] ) -> Tuple: __UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase =3 __UpperCAmelCase =( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) __UpperCAmelCase =( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) __UpperCAmelCase =image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) __UpperCAmelCase =image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: __UpperCAmelCase =True self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase =True self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (padded_height, padded_width) ) def _a ( self : Optional[int] ) -> Tuple: __UpperCAmelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__SCREAMING_SNAKE_CASE ) def _a ( self : Tuple ) -> Dict: __UpperCAmelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE ) @slow def _a ( self : int ) -> Dict: for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase =SwinvaModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) def _a ( self : Dict ) -> Union[str, Any]: __UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase =_config_zero_init(__SCREAMING_SNAKE_CASE ) for model_class in self.all_model_classes: __UpperCAmelCase =model_class(config=__SCREAMING_SNAKE_CASE ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @require_vision @require_torch class _A ( unittest.TestCase ): """simple docstring""" @cached_property def _a ( self : Tuple ) -> Dict: return ( AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ) if is_vision_available() else None ) @slow def _a ( self : int ) -> Optional[int]: __UpperCAmelCase =SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to( __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =self.default_image_processor __UpperCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) __UpperCAmelCase =image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): __UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE ) # verify the logits __UpperCAmelCase =torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =torch.tensor([-0.3_947, -0.4_306, 0.0_026] ).to(__SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
68
1
import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow __A = logging.getLogger() @unittest.skip('Temporarily disable the doc tests.' ) @require_torch @require_tf @slow class _A ( unittest.TestCase ): """simple docstring""" def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Path , __SCREAMING_SNAKE_CASE : Union[str, None] = None , __SCREAMING_SNAKE_CASE : Union[List[str], None] = None , __SCREAMING_SNAKE_CASE : Union[str, List[str], None] = None , __SCREAMING_SNAKE_CASE : bool = True , ) -> List[str]: __UpperCAmelCase =[file for file in os.listdir(__SCREAMING_SNAKE_CASE ) if os.path.isfile(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )] if identifier is not None: __UpperCAmelCase =[file for file in files if identifier in file] if n_identifier is not None: if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): for n_ in n_identifier: __UpperCAmelCase =[file for file in files if n_ not in file] else: __UpperCAmelCase =[file for file in files if n_identifier not in file] __UpperCAmelCase =ignore_files or [] ignore_files.append("""__init__.py""" ) __UpperCAmelCase =[file for file in files if file not in ignore_files] for file in files: # Open all files print("""Testing""" , __SCREAMING_SNAKE_CASE ) if only_modules: __UpperCAmelCase =file.split(""".""" )[0] try: __UpperCAmelCase =getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =doctest.DocTestSuite(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =unittest.TextTestRunner().run(__SCREAMING_SNAKE_CASE ) self.assertIs(len(result.failures ) , 0 ) except AttributeError: logger.info(f'''{module_identifier} is not a module.''' ) else: __UpperCAmelCase =doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed , 0 ) def _a ( self : Optional[Any] ) -> List[str]: __UpperCAmelCase =Path("""src/transformers""" ) __UpperCAmelCase ="""modeling""" __UpperCAmelCase =[ """modeling_ctrl.py""", """modeling_tf_ctrl.py""", ] self.analyze_directory(__SCREAMING_SNAKE_CASE , identifier=__SCREAMING_SNAKE_CASE , ignore_files=__SCREAMING_SNAKE_CASE ) def _a ( self : Tuple ) -> Optional[int]: __UpperCAmelCase =Path("""src/transformers""" ) __UpperCAmelCase ="""tokenization""" self.analyze_directory(__SCREAMING_SNAKE_CASE , identifier=__SCREAMING_SNAKE_CASE ) def _a ( self : Optional[Any] ) -> Optional[Any]: __UpperCAmelCase =Path("""src/transformers""" ) __UpperCAmelCase ="""configuration""" self.analyze_directory(__SCREAMING_SNAKE_CASE , identifier=__SCREAMING_SNAKE_CASE ) def _a ( self : List[Any] ) -> Tuple: __UpperCAmelCase =Path("""src/transformers""" ) __UpperCAmelCase =["""configuration""", """modeling""", """tokenization"""] self.analyze_directory(__SCREAMING_SNAKE_CASE , n_identifier=__SCREAMING_SNAKE_CASE ) def _a ( self : Any ) -> Tuple: __UpperCAmelCase =Path("""docs/source""" ) __UpperCAmelCase =["""favicon.ico"""] self.analyze_directory(__SCREAMING_SNAKE_CASE , ignore_files=__SCREAMING_SNAKE_CASE , only_modules=__SCREAMING_SNAKE_CASE )
68
import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __A = logging.get_logger(__name__) __A = {"vocab_file": "spiece.model"} __A = { "vocab_file": { "AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model", } } __A = { "AI-Sweden/gpt-sw3-126m": 20_48, "AI-Sweden/gpt-sw3-350m": 20_48, "AI-Sweden/gpt-sw3-1.6b": 20_48, "AI-Sweden/gpt-sw3-6.7b": 20_48, "AI-Sweden/gpt-sw3-20b": 20_48, } class _A ( UpperCamelCase ): """simple docstring""" lowerCamelCase : int = VOCAB_FILES_NAMES lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase : Optional[Any] = ['input_ids', 'attention_mask'] def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : Optional[Any] , ) -> None: __UpperCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs __UpperCAmelCase =kwargs.get("""name_or_path""" ) if name_or_path is None: logger.warning( """name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,""" """ you are testing the model, this can safely be ignored""" ) __UpperCAmelCase ="""None""" # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing __UpperCAmelCase ="""<|endoftext|>""" if eos_token is None else eos_token __UpperCAmelCase ="""<unk>""" if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: __UpperCAmelCase =unk_token if pad_token is None else pad_token __UpperCAmelCase =eos_token if bos_token is None else bos_token else: __UpperCAmelCase ="""<pad>""" if pad_token is None else pad_token __UpperCAmelCase ="""<s>""" if bos_token is None else bos_token super().__init__( do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , ) __UpperCAmelCase =do_lower_case __UpperCAmelCase =remove_space __UpperCAmelCase =keep_accents __UpperCAmelCase =vocab_file __UpperCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__SCREAMING_SNAKE_CASE ) # Used for whitespace normalization in input texts # fmt : off __UpperCAmelCase ={""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """„"""} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing __UpperCAmelCase =re.compile( f'''[{"".join(map(__SCREAMING_SNAKE_CASE , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]''' ) def __getstate__( self : Any ) -> str: __UpperCAmelCase =self.__dict__.copy() __UpperCAmelCase =None return state def __setstate__( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]: __UpperCAmelCase =d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): __UpperCAmelCase ={} __UpperCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def _a ( self : Union[str, Any] ) -> int: return len(self.sp_model ) def _a ( self : Dict , __SCREAMING_SNAKE_CASE : str ) -> str: __UpperCAmelCase =self.non_printing_characters_re.sub("""""" , __SCREAMING_SNAKE_CASE ) # Normalize whitespaces __UpperCAmelCase ="""""".join([char if char not in self.whitespaces else """ """ for char in text] ) # NFC Unicode normalization __UpperCAmelCase =unicodedata.normalize("""NFC""" , __SCREAMING_SNAKE_CASE ) return text def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[str]: __UpperCAmelCase =self.preprocess_text(__SCREAMING_SNAKE_CASE ) return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE ) def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str ) -> int: return self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE ) def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : int ) -> str: return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE ) @staticmethod def _a ( __SCREAMING_SNAKE_CASE : str ) -> str: return out_string def _a ( self : Any , __SCREAMING_SNAKE_CASE : List[str] ) -> str: __UpperCAmelCase =[] __UpperCAmelCase ="""""" __UpperCAmelCase =False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token __UpperCAmelCase =True __UpperCAmelCase =[] else: current_sub_tokens.append(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =False out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) return out_string def _a ( self : Any ) -> Dict[str, int]: __UpperCAmelCase ={self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _a ( self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__SCREAMING_SNAKE_CASE ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __UpperCAmelCase =os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(__SCREAMING_SNAKE_CASE , """wb""" ) as fi: __UpperCAmelCase =self.sp_model.serialized_model_proto() fi.write(__SCREAMING_SNAKE_CASE ) return (out_vocab_file,) def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : Union[str, bool] = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]: if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __UpperCAmelCase =self.preprocess_text(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =self.sp_model.encode(__SCREAMING_SNAKE_CASE ) else: __UpperCAmelCase =[self.preprocess_text(__SCREAMING_SNAKE_CASE ) for t in text] __UpperCAmelCase =self.sp_model.encode(__SCREAMING_SNAKE_CASE ) if return_tensors is True or return_tensors == "pt": __UpperCAmelCase =torch.tensor(__SCREAMING_SNAKE_CASE ) return token_ids def _a ( self : str , __SCREAMING_SNAKE_CASE : Union[int, List[int]] ) -> str: return self.sp_model.decode(__SCREAMING_SNAKE_CASE ) def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : "Conversation" ) -> List[int]: __UpperCAmelCase =[f'''User: {text}''' if is_user else f'''Bot: {text}''' for is_user, text in conversation.iter_texts()] __UpperCAmelCase =( f'''{self.eos_token}{self.bos_token}''' + f'''{self.bos_token}'''.join(__SCREAMING_SNAKE_CASE ) + f'''{self.bos_token}Bot:''' ) return self.encode(text=__SCREAMING_SNAKE_CASE )
68
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available SCREAMING_SNAKE_CASE__ : Optional[int] = {"""tokenization_herbert""": ["""HerbertTokenizer"""]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""HerbertTokenizerFast"""] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
0
import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow __A = logging.getLogger() @unittest.skip('Temporarily disable the doc tests.' ) @require_torch @require_tf @slow class _A ( unittest.TestCase ): """simple docstring""" def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Path , __SCREAMING_SNAKE_CASE : Union[str, None] = None , __SCREAMING_SNAKE_CASE : Union[List[str], None] = None , __SCREAMING_SNAKE_CASE : Union[str, List[str], None] = None , __SCREAMING_SNAKE_CASE : bool = True , ) -> List[str]: __UpperCAmelCase =[file for file in os.listdir(__SCREAMING_SNAKE_CASE ) if os.path.isfile(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )] if identifier is not None: __UpperCAmelCase =[file for file in files if identifier in file] if n_identifier is not None: if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): for n_ in n_identifier: __UpperCAmelCase =[file for file in files if n_ not in file] else: __UpperCAmelCase =[file for file in files if n_identifier not in file] __UpperCAmelCase =ignore_files or [] ignore_files.append("""__init__.py""" ) __UpperCAmelCase =[file for file in files if file not in ignore_files] for file in files: # Open all files print("""Testing""" , __SCREAMING_SNAKE_CASE ) if only_modules: __UpperCAmelCase =file.split(""".""" )[0] try: __UpperCAmelCase =getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =doctest.DocTestSuite(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =unittest.TextTestRunner().run(__SCREAMING_SNAKE_CASE ) self.assertIs(len(result.failures ) , 0 ) except AttributeError: logger.info(f'''{module_identifier} is not a module.''' ) else: __UpperCAmelCase =doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed , 0 ) def _a ( self : Optional[Any] ) -> List[str]: __UpperCAmelCase =Path("""src/transformers""" ) __UpperCAmelCase ="""modeling""" __UpperCAmelCase =[ """modeling_ctrl.py""", """modeling_tf_ctrl.py""", ] self.analyze_directory(__SCREAMING_SNAKE_CASE , identifier=__SCREAMING_SNAKE_CASE , ignore_files=__SCREAMING_SNAKE_CASE ) def _a ( self : Tuple ) -> Optional[int]: __UpperCAmelCase =Path("""src/transformers""" ) __UpperCAmelCase ="""tokenization""" self.analyze_directory(__SCREAMING_SNAKE_CASE , identifier=__SCREAMING_SNAKE_CASE ) def _a ( self : Optional[Any] ) -> Optional[Any]: __UpperCAmelCase =Path("""src/transformers""" ) __UpperCAmelCase ="""configuration""" self.analyze_directory(__SCREAMING_SNAKE_CASE , identifier=__SCREAMING_SNAKE_CASE ) def _a ( self : List[Any] ) -> Tuple: __UpperCAmelCase =Path("""src/transformers""" ) __UpperCAmelCase =["""configuration""", """modeling""", """tokenization"""] self.analyze_directory(__SCREAMING_SNAKE_CASE , n_identifier=__SCREAMING_SNAKE_CASE ) def _a ( self : Any ) -> Tuple: __UpperCAmelCase =Path("""docs/source""" ) __UpperCAmelCase =["""favicon.ico"""] self.analyze_directory(__SCREAMING_SNAKE_CASE , ignore_files=__SCREAMING_SNAKE_CASE , only_modules=__SCREAMING_SNAKE_CASE )
68
0
import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler __snake_case = 1_6 __snake_case = 3_2 def _A ( _lowercase , _lowercase = 16 , _lowercase = "bert-base-cased" ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = AutoTokenizer.from_pretrained(_lowercase ) __UpperCamelCase = load_dataset('glue' , 'mrpc' ) def tokenize_function(_lowercase ): # max_length=None => use the model max length (it's actually the default) __UpperCamelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowercase , max_length=_lowercase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __UpperCamelCase = datasets.map( _lowercase , batched=_lowercase , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_lowercase ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __UpperCamelCase = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(_lowercase ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(_lowercase , padding='max_length' , max_length=1_28 , return_tensors='pt' ) return tokenizer.pad(_lowercase , padding='longest' , return_tensors='pt' ) # Instantiate dataloaders. __UpperCamelCase = DataLoader( tokenized_datasets['train'] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase ) __UpperCamelCase = DataLoader( tokenized_datasets['validation'] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase ) return train_dataloader, eval_dataloader def _A ( _lowercase , _lowercase ) -> int: """simple docstring""" __UpperCamelCase = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __UpperCamelCase = config['lr'] __UpperCamelCase = int(config['num_epochs'] ) __UpperCamelCase = int(config['seed'] ) __UpperCamelCase = int(config['batch_size'] ) __UpperCamelCase = args.model_name_or_path set_seed(_lowercase ) __UpperCamelCase, __UpperCamelCase = get_dataloaders(_lowercase , _lowercase , _lowercase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __UpperCamelCase = AutoModelForSequenceClassification.from_pretrained(_lowercase , return_dict=_lowercase ) # Instantiate optimizer __UpperCamelCase = ( AdamW if accelerator.state.deepspeed_plugin is None or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) __UpperCamelCase = optimizer_cls(params=model.parameters() , lr=_lowercase ) if accelerator.state.deepspeed_plugin is not None: __UpperCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[ 'gradient_accumulation_steps' ] else: __UpperCamelCase = 1 __UpperCamelCase = (len(_lowercase ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): __UpperCamelCase = get_linear_schedule_with_warmup( optimizer=_lowercase , num_warmup_steps=0 , num_training_steps=_lowercase , ) else: __UpperCamelCase = DummyScheduler(_lowercase , total_num_steps=_lowercase , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = accelerator.prepare( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) # We need to keep track of how many total steps we have iterated over __UpperCamelCase = 0 # We also need to keep track of the stating epoch so files are named properly __UpperCamelCase = 0 # Now we train the model __UpperCamelCase = evaluate.load('glue' , 'mrpc' ) __UpperCamelCase = 0 __UpperCamelCase = {} for epoch in range(_lowercase , _lowercase ): model.train() for step, batch in enumerate(_lowercase ): __UpperCamelCase = model(**_lowercase ) __UpperCamelCase = outputs.loss __UpperCamelCase = loss / gradient_accumulation_steps accelerator.backward(_lowercase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() __UpperCamelCase = 0 for step, batch in enumerate(_lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __UpperCamelCase = model(**_lowercase ) __UpperCamelCase = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times __UpperCamelCase, __UpperCamelCase = accelerator.gather( (predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(_lowercase ) - 1: __UpperCamelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen] __UpperCamelCase = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=_lowercase , references=_lowercase , ) __UpperCamelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''' , _lowercase ) __UpperCamelCase = eval_metric['accuracy'] if best_performance < eval_metric["accuracy"]: __UpperCamelCase = eval_metric['accuracy'] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}''' accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f: json.dump(_lowercase , _lowercase ) def _A ( ) -> List[str]: """simple docstring""" __UpperCamelCase = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' ) parser.add_argument( '--model_name_or_path' , type=_lowercase , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_lowercase , ) parser.add_argument( '--output_dir' , type=_lowercase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , ) parser.add_argument( '--performance_lower_bound' , type=_lowercase , default=_lowercase , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , ) parser.add_argument( '--num_epochs' , type=_lowercase , default=3 , help='Number of train epochs.' , ) __UpperCamelCase = parser.parse_args() __UpperCamelCase = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16} training_function(_lowercase , _lowercase ) if __name__ == "__main__": main()
1
import random import unittest import numpy as np import transformers from transformers import is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax if is_flax_available(): import os import jax.numpy as jnp from jax import jit from transformers import AutoTokenizer, FlaxAutoModelForCausalLM from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model __A = "0.12" # assumed parallelism: 8 if is_torch_available(): import torch def lowercase__ ( A_: int , A_: Optional[Any] , A_: List[str]=None ) -> List[str]: """simple docstring""" if rng is None: __UpperCAmelCase =random.Random() __UpperCAmelCase =1 for dim in shape: total_dims *= dim __UpperCAmelCase =[] for _ in range(A_ ): values.append(rng.randint(0 , vocab_size - 1 ) ) __UpperCAmelCase =np.array(A_ , dtype=jnp.intaa ).reshape(A_ ) return output def lowercase__ ( A_: List[str] , A_: List[str]=None ) -> Any: """simple docstring""" __UpperCAmelCase =ids_tensor(A_ , vocab_size=2 , rng=A_ ) # make sure that at least one token is attended to for each batch __UpperCAmelCase =1 return attn_mask @require_flax class _A : """simple docstring""" lowerCamelCase : Optional[Any] = None lowerCamelCase : int = () def _a ( self : str ) -> Tuple: __UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() # cut to half length & take max batch_size 3 __UpperCAmelCase =2 __UpperCAmelCase =inputs["""input_ids"""].shape[-1] // 2 __UpperCAmelCase =inputs["""input_ids"""][:max_batch_size, :sequence_length] __UpperCAmelCase =jnp.ones_like(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =attention_mask[:max_batch_size, :sequence_length] # generate max 5 tokens __UpperCAmelCase =input_ids.shape[-1] + 5 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` __UpperCAmelCase =config.eos_token_id return config, input_ids, attention_mask, max_length @is_pt_flax_cross_test def _a ( self : Union[str, Any] ) -> Optional[int]: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() __UpperCAmelCase =False __UpperCAmelCase =max_length __UpperCAmelCase =0 for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model_class.__name__[4:] # Skip the "Flax" at the beginning __UpperCAmelCase =getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =pt_model_class(__SCREAMING_SNAKE_CASE ).eval() __UpperCAmelCase =load_flax_weights_in_pytorch_model(__SCREAMING_SNAKE_CASE , flax_model.params ) __UpperCAmelCase =flax_model.generate(__SCREAMING_SNAKE_CASE ).sequences __UpperCAmelCase =pt_model.generate(torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.long ) ) if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]: __UpperCAmelCase =flax_generation_outputs[:, : pt_generation_outputs.shape[-1]] self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() ) def _a ( self : Optional[int] ) -> Dict: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() __UpperCAmelCase =False __UpperCAmelCase =max_length for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =jit(model.generate ) __UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : Union[str, Any] ) -> List[str]: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() __UpperCAmelCase =True __UpperCAmelCase =max_length for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =jit(model.generate ) __UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : List[Any] ) -> Any: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() __UpperCAmelCase =False __UpperCAmelCase =max_length __UpperCAmelCase =2 for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =jit(model.generate ) __UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : Any ) -> Tuple: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() __UpperCAmelCase =False __UpperCAmelCase =max_length __UpperCAmelCase =2 __UpperCAmelCase =2 for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences ) def _a ( self : Union[str, Any] ) -> List[Any]: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() __UpperCAmelCase =True __UpperCAmelCase =max_length __UpperCAmelCase =0.8 __UpperCAmelCase =10 __UpperCAmelCase =0.3 __UpperCAmelCase =1 __UpperCAmelCase =8 __UpperCAmelCase =9 for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =jit(model.generate ) __UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : Union[str, Any] ) -> Optional[Any]: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() __UpperCAmelCase =max_length __UpperCAmelCase =1 __UpperCAmelCase =8 __UpperCAmelCase =9 for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =jit(model.generate ) __UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : Optional[int] ) -> Any: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() __UpperCAmelCase =max_length __UpperCAmelCase =2 __UpperCAmelCase =1 __UpperCAmelCase =8 __UpperCAmelCase =9 for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =jit(model.generate ) __UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : List[str] ) -> Dict: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() # pad attention mask on the left __UpperCAmelCase =attention_mask.at[(0, 0)].set(0 ) __UpperCAmelCase =False __UpperCAmelCase =max_length for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =jit(model.generate ) __UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : Dict ) -> Tuple: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() # pad attention mask on the left __UpperCAmelCase =attention_mask.at[(0, 0)].set(0 ) __UpperCAmelCase =True __UpperCAmelCase =max_length for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =jit(model.generate ) __UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : Dict ) -> Tuple: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() # pad attention mask on the left __UpperCAmelCase =attention_mask.at[(0, 0)].set(0 ) __UpperCAmelCase =2 __UpperCAmelCase =max_length for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =jit(model.generate ) __UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) @require_flax class _A ( unittest.TestCase ): """simple docstring""" def _a ( self : int ) -> Any: __UpperCAmelCase =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-bert""" ) __UpperCAmelCase =FlaxAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" ) __UpperCAmelCase ="""Hello world""" __UpperCAmelCase =tokenizer(__SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_ids # typos are quickly detected (the correct argument is `do_sample`) with self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , """do_samples""" ): model.generate(__SCREAMING_SNAKE_CASE , do_samples=__SCREAMING_SNAKE_CASE ) # arbitrary arguments that will not be used anywhere are also not accepted with self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , """foo""" ): __UpperCAmelCase ={"""foo""": """bar"""} model.generate(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
68
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase_ = { """configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""], """tokenization_roberta""": ["""RobertaTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ["""RobertaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ """ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """RobertaForCausalLM""", """RobertaForMaskedLM""", """RobertaForMultipleChoice""", """RobertaForQuestionAnswering""", """RobertaForSequenceClassification""", """RobertaForTokenClassification""", """RobertaModel""", """RobertaPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ """TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFRobertaForCausalLM""", """TFRobertaForMaskedLM""", """TFRobertaForMultipleChoice""", """TFRobertaForQuestionAnswering""", """TFRobertaForSequenceClassification""", """TFRobertaForTokenClassification""", """TFRobertaMainLayer""", """TFRobertaModel""", """TFRobertaPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ """FlaxRobertaForCausalLM""", """FlaxRobertaForMaskedLM""", """FlaxRobertaForMultipleChoice""", """FlaxRobertaForQuestionAnswering""", """FlaxRobertaForSequenceClassification""", """FlaxRobertaForTokenClassification""", """FlaxRobertaModel""", """FlaxRobertaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig from .tokenization_roberta import RobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roberta_fast import RobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta import ( ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, RobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta import ( TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, TFRobertaForQuestionAnswering, TFRobertaForSequenceClassification, TFRobertaForTokenClassification, TFRobertaMainLayer, TFRobertaModel, TFRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, FlaxRobertaPreTrainedModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
2
from __future__ import annotations from collections.abc import Iterator class _A : """simple docstring""" def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> None: __UpperCAmelCase =value __UpperCAmelCase =None __UpperCAmelCase =None class _A : """simple docstring""" def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Node ) -> None: __UpperCAmelCase =tree def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Node | None ) -> int: if node is None: return 0 return node.value + ( self.depth_first_search(node.left ) + self.depth_first_search(node.right ) ) def __iter__( self : int ) -> Iterator[int]: yield self.depth_first_search(self.tree ) if __name__ == "__main__": import doctest doctest.testmod()
68
0
'''simple docstring''' import inspect import unittest from transformers import MobileViTVaConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel from transformers.models.mobilevitva.modeling_mobilevitva import ( MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, make_divisible, ) if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class SCREAMING_SNAKE_CASE__ ( snake_case_): def UpperCAmelCase_ ( self )-> Tuple: '''simple docstring''' UpperCamelCase = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(A_ , 'width_multiplier' ) ) class SCREAMING_SNAKE_CASE__ : def __init__( self , A_ , A_=13 , A_=64 , A_=2 , A_=3 , A_="swish" , A_=3 , A_=32 , A_=0.1 , A_=0.02 , A_=True , A_=True , A_=10 , A_=None , A_=0.25 , A_=0.0 , A_=0.0 , )-> Dict: '''simple docstring''' UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = image_size UpperCamelCase = patch_size UpperCamelCase = num_channels UpperCamelCase = make_divisible(512 * width_multiplier , divisor=8 ) UpperCamelCase = hidden_act UpperCamelCase = conv_kernel_size UpperCamelCase = output_stride UpperCamelCase = classifier_dropout_prob UpperCamelCase = use_labels UpperCamelCase = is_training UpperCamelCase = num_labels UpperCamelCase = initializer_range UpperCamelCase = scope UpperCamelCase = width_multiplier UpperCamelCase = ffn_dropout UpperCamelCase = attn_dropout def UpperCAmelCase_ ( self )-> Dict: '''simple docstring''' UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase = None UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels ) UpperCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) UpperCamelCase = self.get_config() return config, pixel_values, labels, pixel_labels def UpperCAmelCase_ ( self )-> str: '''simple docstring''' return MobileViTVaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , ) def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ )-> List[Any]: '''simple docstring''' UpperCamelCase = MobileViTVaModel(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase = model(A_ ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ )-> Optional[Any]: '''simple docstring''' UpperCamelCase = self.num_labels UpperCamelCase = MobileViTVaForImageClassification(A_ ) model.to(A_ ) model.eval() UpperCamelCase = model(A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ )-> Dict: '''simple docstring''' UpperCamelCase = self.num_labels UpperCamelCase = MobileViTVaForSemanticSegmentation(A_ ) model.to(A_ ) model.eval() UpperCamelCase = model(A_ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) UpperCamelCase = model(A_ , labels=A_ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def UpperCAmelCase_ ( self )-> str: '''simple docstring''' UpperCamelCase = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs UpperCamelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , unittest.TestCase): lowerCAmelCase_ = ( (MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation) if is_torch_available() else () ) lowerCAmelCase_ = ( { """feature-extraction""": MobileViTVaModel, """image-classification""": MobileViTVaForImageClassification, """image-segmentation""": MobileViTVaForSemanticSegmentation, } if is_torch_available() else {} ) lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False def UpperCAmelCase_ ( self )-> int: '''simple docstring''' UpperCamelCase = MobileViTVaModelTester(self ) UpperCamelCase = MobileViTVaConfigTester(self , config_class=A_ , has_text_modality=A_ ) def UpperCAmelCase_ ( self )-> str: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='MobileViTV2 does not use inputs_embeds' ) def UpperCAmelCase_ ( self )-> Optional[int]: '''simple docstring''' pass @unittest.skip(reason='MobileViTV2 does not support input and output embeddings' ) def UpperCAmelCase_ ( self )-> List[Any]: '''simple docstring''' pass @unittest.skip(reason='MobileViTV2 does not output attentions' ) def UpperCAmelCase_ ( self )-> List[str]: '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' ) def UpperCAmelCase_ ( self )-> str: '''simple docstring''' pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def UpperCAmelCase_ ( self )-> Dict: '''simple docstring''' pass def UpperCAmelCase_ ( self )-> int: '''simple docstring''' UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(A_ ) UpperCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase = [*signature.parameters.keys()] UpperCamelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , A_ ) def UpperCAmelCase_ ( self )-> Optional[int]: '''simple docstring''' UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def UpperCAmelCase_ ( self )-> Dict: '''simple docstring''' def check_hidden_states_output(A_ , A_ , A_ ): UpperCamelCase = model_class(A_ ) model.to(A_ ) model.eval() with torch.no_grad(): UpperCamelCase = model(**self._prepare_for_class(A_ , A_ ) ) UpperCamelCase = outputs.hidden_states UpperCamelCase = 5 self.assertEqual(len(A_ ) , A_ ) # MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. UpperCamelCase = 2 for i in range(len(A_ ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = True check_hidden_states_output(A_ , A_ , A_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCamelCase = True check_hidden_states_output(A_ , A_ , A_ ) def UpperCAmelCase_ ( self )-> Optional[Any]: '''simple docstring''' UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A_ ) def UpperCAmelCase_ ( self )-> Tuple: '''simple docstring''' UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*A_ ) @slow def UpperCAmelCase_ ( self )-> Optional[int]: '''simple docstring''' for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase = MobileViTVaModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) def A_( ): UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') return image @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( unittest.TestCase): @cached_property def UpperCAmelCase_ ( self )-> Union[str, Any]: '''simple docstring''' return ( MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ) if is_vision_available() else None ) @slow def UpperCAmelCase_ ( self )-> List[Any]: '''simple docstring''' UpperCamelCase = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to( A_ ) UpperCamelCase = self.default_image_processor UpperCamelCase = prepare_img() UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ ) # forward pass with torch.no_grad(): UpperCamelCase = model(**A_ ) # verify the logits UpperCamelCase = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , A_ ) UpperCamelCase = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] ).to(A_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1e-4 ) ) @slow def UpperCAmelCase_ ( self )-> Optional[int]: '''simple docstring''' UpperCamelCase = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) UpperCamelCase = model.to(A_ ) UpperCamelCase = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) UpperCamelCase = prepare_img() UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ ) # forward pass with torch.no_grad(): UpperCamelCase = model(**A_ ) UpperCamelCase = outputs.logits # verify the logits UpperCamelCase = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , A_ ) UpperCamelCase = torch.tensor( [ [[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]], [[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]], [[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]], ] , device=A_ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , A_ , atol=1e-4 ) ) @slow def UpperCAmelCase_ ( self )-> int: '''simple docstring''' UpperCamelCase = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) UpperCamelCase = model.to(A_ ) UpperCamelCase = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) UpperCamelCase = prepare_img() UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ ) # forward pass with torch.no_grad(): UpperCamelCase = model(**A_ ) UpperCamelCase = outputs.logits.detach().cpu() UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=A_ , target_sizes=[(50, 60)] ) UpperCamelCase = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , A_ ) UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=A_ ) UpperCamelCase = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , A_ )
3
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType from ...utils.imports import is_botoa_available from .config_args import SageMakerConfig from .config_utils import ( DYNAMO_BACKENDS, _ask_field, _ask_options, _convert_dynamo_backend, _convert_mixed_precision, _convert_sagemaker_distributed_mode, _convert_yes_no_to_bool, ) if is_botoa_available(): import botoa # noqa: F401 def lowercase__ ( A_: Union[str, Any] ) -> List[Any]: """simple docstring""" __UpperCAmelCase =botoa.client("""iam""" ) __UpperCAmelCase ={ """Version""": """2012-10-17""", """Statement""": [ {"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""} ], } try: # create the role, associated with the chosen trust policy iam_client.create_role( RoleName=A_ , AssumeRolePolicyDocument=json.dumps(A_ , indent=2 ) ) __UpperCAmelCase ={ """Version""": """2012-10-17""", """Statement""": [ { """Effect""": """Allow""", """Action""": [ """sagemaker:*""", """ecr:GetDownloadUrlForLayer""", """ecr:BatchGetImage""", """ecr:BatchCheckLayerAvailability""", """ecr:GetAuthorizationToken""", """cloudwatch:PutMetricData""", """cloudwatch:GetMetricData""", """cloudwatch:GetMetricStatistics""", """cloudwatch:ListMetrics""", """logs:CreateLogGroup""", """logs:CreateLogStream""", """logs:DescribeLogStreams""", """logs:PutLogEvents""", """logs:GetLogEvents""", """s3:CreateBucket""", """s3:ListBucket""", """s3:GetBucketLocation""", """s3:GetObject""", """s3:PutObject""", ], """Resource""": """*""", } ], } # attach policy to role iam_client.put_role_policy( RoleName=A_ , PolicyName=F'''{role_name}_policy_permission''' , PolicyDocument=json.dumps(A_ , indent=2 ) , ) except iam_client.exceptions.EntityAlreadyExistsException: print(F'''role {role_name} already exists. Using existing one''' ) def lowercase__ ( A_: Dict ) -> Any: """simple docstring""" __UpperCAmelCase =botoa.client("""iam""" ) return iam_client.get_role(RoleName=A_ )["Role"]["Arn"] def lowercase__ ( ) -> Union[str, Any]: """simple docstring""" __UpperCAmelCase =_ask_options( """How do you want to authorize?""" , ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """] , A_ , ) __UpperCAmelCase =None if credentials_configuration == 0: __UpperCAmelCase =_ask_field("""Enter your AWS Profile name: [default] """ , default="""default""" ) __UpperCAmelCase =aws_profile else: print( """Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,""" """`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" ) __UpperCAmelCase =_ask_field("""AWS Access Key ID: """ ) __UpperCAmelCase =aws_access_key_id __UpperCAmelCase =_ask_field("""AWS Secret Access Key: """ ) __UpperCAmelCase =aws_secret_access_key __UpperCAmelCase =_ask_field("""Enter your AWS Region: [us-east-1]""" , default="""us-east-1""" ) __UpperCAmelCase =aws_region __UpperCAmelCase =_ask_options( """Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""" , ["""Provide IAM Role name""", """Create new IAM role using credentials"""] , A_ , ) if role_management == 0: __UpperCAmelCase =_ask_field("""Enter your IAM role name: """ ) else: __UpperCAmelCase ="""accelerate_sagemaker_execution_role""" print(F'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' ) _create_iam_role_for_sagemaker(A_ ) __UpperCAmelCase =_ask_field( """Do you want to use custom Docker image? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , ) __UpperCAmelCase =None if is_custom_docker_image: __UpperCAmelCase =_ask_field("""Enter your Docker image: """ , lambda A_ : str(A_ ).lower() ) __UpperCAmelCase =_ask_field( """Do you want to provide SageMaker input channels with data locations? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , ) __UpperCAmelCase =None if is_sagemaker_inputs_enabled: __UpperCAmelCase =_ask_field( """Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """ , lambda A_ : str(A_ ).lower() , ) __UpperCAmelCase =_ask_field( """Do you want to enable SageMaker metrics? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , ) __UpperCAmelCase =None if is_sagemaker_metrics_enabled: __UpperCAmelCase =_ask_field( """Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """ , lambda A_ : str(A_ ).lower() , ) __UpperCAmelCase =_ask_options( """What is the distributed mode?""" , ["""No distributed training""", """Data parallelism"""] , _convert_sagemaker_distributed_mode , ) __UpperCAmelCase ={} __UpperCAmelCase =_ask_field( """Do you wish to optimize your script with torch dynamo?[yes/NO]:""" , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , ) if use_dynamo: __UpperCAmelCase ="""dynamo_""" __UpperCAmelCase =_ask_options( """Which dynamo backend would you like to use?""" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , ) __UpperCAmelCase =_ask_field( """Do you want to customize the defaults sent to torch.compile? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , ) if use_custom_options: __UpperCAmelCase =_ask_options( """Which mode do you want to use?""" , A_ , lambda A_ : TORCH_DYNAMO_MODES[int(A_ )] , default="""default""" , ) __UpperCAmelCase =_ask_field( """Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , ) __UpperCAmelCase =_ask_field( """Do you want to enable dynamic shape tracing? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , ) __UpperCAmelCase ="""Which EC2 instance type you want to use for your training?""" if distributed_type != SageMakerDistributedType.NO: __UpperCAmelCase =_ask_options( A_ , A_ , lambda A_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(A_ )] ) else: eca_instance_query += "? [ml.p3.2xlarge]:" __UpperCAmelCase =_ask_field(A_ , lambda A_ : str(A_ ).lower() , default="""ml.p3.2xlarge""" ) __UpperCAmelCase =1 if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL): __UpperCAmelCase =_ask_field( """How many machines do you want use? [1]: """ , A_ , default=1 , ) __UpperCAmelCase =_ask_options( """Do you wish to use FP16 or BF16 (mixed precision)?""" , ["""no""", """fp16""", """bf16""", """fp8"""] , _convert_mixed_precision , ) if use_dynamo and mixed_precision == "no": print( """Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" ) return SageMakerConfig( image_uri=A_ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=A_ , use_cpu=A_ , dynamo_config=A_ , eca_instance_type=A_ , profile=A_ , region=A_ , iam_role_name=A_ , mixed_precision=A_ , num_machines=A_ , sagemaker_inputs_file=A_ , sagemaker_metrics_file=A_ , )
68
0
"""simple docstring""" def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : float ): if edge <= 0 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError('Length must be a positive.' ) return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : float ): if edge <= 0 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError('Length must be a positive.' ) return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3) if __name__ == "__main__": import doctest doctest.testmod()
4
from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"} class _A ( UpperCamelCase ): """simple docstring""" lowerCamelCase : Tuple = 'ctrl' lowerCamelCase : Any = ['past_key_values'] lowerCamelCase : Optional[int] = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=246534 , __SCREAMING_SNAKE_CASE : int=256 , __SCREAMING_SNAKE_CASE : Optional[Any]=1280 , __SCREAMING_SNAKE_CASE : Optional[Any]=8192 , __SCREAMING_SNAKE_CASE : int=48 , __SCREAMING_SNAKE_CASE : Union[str, Any]=16 , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=1e-6 , __SCREAMING_SNAKE_CASE : List[str]=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , **__SCREAMING_SNAKE_CASE : int , ) -> Any: __UpperCAmelCase =vocab_size __UpperCAmelCase =n_positions __UpperCAmelCase =n_embd __UpperCAmelCase =n_layer __UpperCAmelCase =n_head __UpperCAmelCase =dff __UpperCAmelCase =resid_pdrop __UpperCAmelCase =embd_pdrop __UpperCAmelCase =layer_norm_epsilon __UpperCAmelCase =initializer_range __UpperCAmelCase =use_cache super().__init__(**__SCREAMING_SNAKE_CASE )
68
0
'''simple docstring''' def A (__lowerCamelCase :float ): return 10 - x * x def A (__lowerCamelCase :float , __lowerCamelCase :float ): # Bolzano theory in order to find if there is a root between a and b if equation(__lowerCamelCase ) * equation(__lowerCamelCase ) >= 0: raise ValueError("""Wrong space!""" ) _lowerCAmelCase = a while (b - a) >= 0.01: # Find middle point _lowerCAmelCase = (a + b) / 2 # Check if middle point is root if equation(__lowerCamelCase ) == 0.0: break # Decide the side to repeat the steps if equation(__lowerCamelCase ) * equation(__lowerCamelCase ) < 0: _lowerCAmelCase = c else: _lowerCAmelCase = c return c if __name__ == "__main__": import doctest doctest.testmod() print(bisection(-2, 5)) print(bisection(0, 6))
5
import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() __A = logging.get_logger(__name__) __A = [ ["attention", "attn"], ["encoder_attention", "encoder_attn"], ["q_lin", "q_proj"], ["k_lin", "k_proj"], ["v_lin", "v_proj"], ["out_lin", "out_proj"], ["norm_embeddings", "layernorm_embedding"], ["position_embeddings", "embed_positions"], ["embeddings", "embed_tokens"], ["ffn.lin", "fc"], ] def lowercase__ ( A_: Optional[Any] ) -> Union[str, Any]: """simple docstring""" if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: __UpperCAmelCase =k.replace(A_ , A_ ) if k.startswith("""encoder""" ): __UpperCAmelCase =k.replace(""".attn""" , """.self_attn""" ) __UpperCAmelCase =k.replace("""norm1""" , """self_attn_layer_norm""" ) __UpperCAmelCase =k.replace("""norm2""" , """final_layer_norm""" ) elif k.startswith("""decoder""" ): __UpperCAmelCase =k.replace("""norm1""" , """self_attn_layer_norm""" ) __UpperCAmelCase =k.replace("""norm2""" , """encoder_attn_layer_norm""" ) __UpperCAmelCase =k.replace("""norm3""" , """final_layer_norm""" ) return k def lowercase__ ( A_: Tuple ) -> str: """simple docstring""" __UpperCAmelCase =[ """model.encoder.layernorm_embedding.weight""", """model.encoder.layernorm_embedding.bias""", """model.decoder.layernorm_embedding.weight""", """model.decoder.layernorm_embedding.bias""", ] for k in keys: __UpperCAmelCase =sd.pop(A_ ) __UpperCAmelCase =k.replace("""layernorm_embedding""" , """layer_norm""" ) assert new_k not in sd __UpperCAmelCase =v __A = ["START"] @torch.no_grad() def lowercase__ ( A_: List[Any] , A_: str , A_: int ) -> Optional[int]: """simple docstring""" __UpperCAmelCase =torch.load(A_ , map_location="""cpu""" ) __UpperCAmelCase =model["""model"""] __UpperCAmelCase =BlenderbotConfig.from_json_file(A_ ) __UpperCAmelCase =BlenderbotForConditionalGeneration(A_ ) __UpperCAmelCase =m.model.state_dict().keys() __UpperCAmelCase =[] __UpperCAmelCase ={} for k, v in sd.items(): if k in IGNORE_KEYS: continue __UpperCAmelCase =rename_state_dict_key(A_ ) if new_k not in valid_keys: failures.append([k, new_k] ) else: __UpperCAmelCase =v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(A_ ) m.model.load_state_dict(A_ , strict=A_ ) m.half() m.save_pretrained(A_ ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin") parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.") parser.add_argument( "--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use" ) __A = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
68
0
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler') class UpperCamelCase_ : def __init__( self :str , __A :str , __A :List[Any] , __A :bool = True , __A :bool = False ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = scheduler SCREAMING_SNAKE_CASE__ = optimizers if isinstance(__A , (list, tuple) ) else [optimizers] SCREAMING_SNAKE_CASE__ = split_batches SCREAMING_SNAKE_CASE__ = step_with_optimizer SCREAMING_SNAKE_CASE__ = GradientState() def _snake_case ( self :List[Any] , *__A :Dict , **__A :Any ) -> Optional[int]: """simple docstring""" if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*__A , **__A ) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*__A , **__A ) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step SCREAMING_SNAKE_CASE__ = AcceleratorState().num_processes for _ in range(__A ): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler , """total_steps""" ): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*__A , **__A ) else: self.scheduler.step(*__A , **__A ) def _snake_case ( self :Tuple ) -> List[str]: """simple docstring""" return self.scheduler.get_last_lr() def _snake_case ( self :Union[str, Any] ) -> str: """simple docstring""" return self.scheduler.state_dict() def _snake_case ( self :Tuple , __A :Any ) -> Optional[int]: """simple docstring""" self.scheduler.load_state_dict(__A ) def _snake_case ( self :Optional[int] ) -> Optional[Any]: """simple docstring""" return self.scheduler.get_lr() def _snake_case ( self :Any , *__A :Optional[int] , **__A :Optional[int] ) -> List[str]: """simple docstring""" return self.scheduler.print_lr(*__A , **__A )
6
from itertools import permutations def lowercase__ ( A_: tuple ) -> bool: """simple docstring""" if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False __UpperCAmelCase =[7, 11, 13, 17] for i, test in enumerate(A_ ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def lowercase__ ( A_: int = 10 ) -> int: """simple docstring""" return sum( int("""""".join(map(A_ , A_ ) ) ) for num in permutations(range(A_ ) ) if is_substring_divisible(A_ ) ) if __name__ == "__main__": print(F"""{solution() = }""")
68
0
"""simple docstring""" import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class lowercase_ ( __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCAmelCase : List[str] = TextToVideoSDPipeline UpperCAmelCase : List[str] = TEXT_TO_IMAGE_PARAMS UpperCAmelCase : int = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. UpperCAmelCase : Any = frozenset( [ '''num_inference_steps''', '''generator''', '''latents''', '''return_dict''', '''callback''', '''callback_steps''', ] ) def lowerCAmelCase_ ( self : Optional[Any] ): torch.manual_seed(0 ) _A = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , ) _A = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , ) torch.manual_seed(0 ) _A = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) _A = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='gelu' , projection_dim=512 , ) _A = CLIPTextModel(_UpperCAmelCase ) _A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) _A = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, } return components def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int=0 ): if str(_UpperCAmelCase ).startswith('mps' ): _A = torch.manual_seed(_UpperCAmelCase ) else: _A = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase ) _A = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'pt', } return inputs def lowerCAmelCase_ ( self : Optional[Any] ): _A = 'cpu' # ensure determinism for the device-dependent torch.Generator _A = self.get_dummy_components() _A = TextToVideoSDPipeline(**_UpperCAmelCase ) _A = sd_pipe.to(_UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) _A = self.get_dummy_inputs(_UpperCAmelCase ) _A = 'np' _A = sd_pipe(**_UpperCAmelCase ).frames _A = frames[0][-3:, -3:, -1] assert frames[0].shape == (64, 64, 3) _A = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCAmelCase_ ( self : Any ): self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_UpperCAmelCase , expected_max_diff=3E-3 ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def lowerCAmelCase_ ( self : int ): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_UpperCAmelCase , expected_max_diff=1E-2 ) @unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' ) def lowerCAmelCase_ ( self : Optional[Any] ): pass @unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' ) def lowerCAmelCase_ ( self : Optional[int] ): pass @unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' ) def lowerCAmelCase_ ( self : Optional[int] ): pass def lowerCAmelCase_ ( self : Union[str, Any] ): return super().test_progress_bar() @slow @skip_mps class lowercase_ ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self : Any ): _A = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy' ) _A = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' ) _A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) _A = pipe.to('cuda' ) _A = 'Spiderman is surfing' _A = torch.Generator(device='cpu' ).manual_seed(0 ) _A = pipe(_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=25 , output_type='pt' ).frames _A = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5E-2 def lowerCAmelCase_ ( self : Dict ): _A = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy' ) _A = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' ) _A = pipe.to('cuda' ) _A = 'Spiderman is surfing' _A = torch.Generator(device='cpu' ).manual_seed(0 ) _A = pipe(_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type='pt' ).frames _A = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5E-2
7
from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar __A = TypeVar("T") def lowercase__ ( A_: int ) -> int: """simple docstring""" return (position - 1) // 2 def lowercase__ ( A_: int ) -> int: """simple docstring""" return (2 * position) + 1 def lowercase__ ( A_: int ) -> int: """simple docstring""" return (2 * position) + 2 class _A ( Generic[T] ): """simple docstring""" def __init__( self : List[str] ) -> None: __UpperCAmelCase =[] __UpperCAmelCase ={} __UpperCAmelCase =0 def __len__( self : str ) -> int: return self.elements def __repr__( self : Dict ) -> str: return str(self.heap ) def _a ( self : Optional[int] ) -> bool: # Check if the priority queue is empty return self.elements == 0 def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None: # Add an element with given priority to the queue self.heap.append((elem, weight) ) __UpperCAmelCase =self.elements self.elements += 1 self._bubble_up(__SCREAMING_SNAKE_CASE ) def _a ( self : Optional[int] ) -> T: # Remove and return the element with lowest weight (highest priority) if self.elements > 1: self._swap_nodes(0 , self.elements - 1 ) __UpperCAmelCase , __UpperCAmelCase =self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: __UpperCAmelCase , __UpperCAmelCase =self.heap[0] self._bubble_down(__SCREAMING_SNAKE_CASE ) return elem def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None: # Update the weight of the given key __UpperCAmelCase =self.position_map[elem] __UpperCAmelCase =(elem, weight) if position > 0: __UpperCAmelCase =get_parent_position(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase , __UpperCAmelCase =self.heap[parent_position] if parent_weight > weight: self._bubble_up(__SCREAMING_SNAKE_CASE ) else: self._bubble_down(__SCREAMING_SNAKE_CASE ) else: self._bubble_down(__SCREAMING_SNAKE_CASE ) def _a ( self : Any , __SCREAMING_SNAKE_CASE : T ) -> None: # Place a node at the proper position (upward movement) [to be used internally # only] __UpperCAmelCase =self.position_map[elem] if curr_pos == 0: return None __UpperCAmelCase =get_parent_position(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase , __UpperCAmelCase =self.heap[curr_pos] __UpperCAmelCase , __UpperCAmelCase =self.heap[parent_position] if parent_weight > weight: self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return self._bubble_up(__SCREAMING_SNAKE_CASE ) return None def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : T ) -> None: # Place a node at the proper position (downward movement) [to be used # internally only] __UpperCAmelCase =self.position_map[elem] __UpperCAmelCase , __UpperCAmelCase =self.heap[curr_pos] __UpperCAmelCase =get_child_left_position(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =get_child_right_position(__SCREAMING_SNAKE_CASE ) if child_left_position < self.elements and child_right_position < self.elements: __UpperCAmelCase , __UpperCAmelCase =self.heap[child_left_position] __UpperCAmelCase , __UpperCAmelCase =self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return self._bubble_down(__SCREAMING_SNAKE_CASE ) if child_left_position < self.elements: __UpperCAmelCase , __UpperCAmelCase =self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return self._bubble_down(__SCREAMING_SNAKE_CASE ) else: return None if child_right_position < self.elements: __UpperCAmelCase , __UpperCAmelCase =self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return self._bubble_down(__SCREAMING_SNAKE_CASE ) return None def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> None: # Swap the nodes at the given positions __UpperCAmelCase =self.heap[nodea_pos][0] __UpperCAmelCase =self.heap[nodea_pos][0] __UpperCAmelCase , __UpperCAmelCase =( self.heap[nodea_pos], self.heap[nodea_pos], ) __UpperCAmelCase =nodea_pos __UpperCAmelCase =nodea_pos class _A ( Generic[T] ): """simple docstring""" def __init__( self : List[Any] ) -> None: __UpperCAmelCase ={} __UpperCAmelCase =0 def __repr__( self : Tuple ) -> str: return str(self.connections ) def __len__( self : str ) -> int: return self.nodes def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : T ) -> None: # Add a node in the graph if it is not in the graph if node not in self.connections: __UpperCAmelCase ={} self.nodes += 1 def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None: # Add an edge between 2 nodes in the graph self.add_node(__SCREAMING_SNAKE_CASE ) self.add_node(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =weight __UpperCAmelCase =weight def lowercase__ ( A_: GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]: """simple docstring""" __UpperCAmelCase ={node: maxsize for node in graph.connections} __UpperCAmelCase ={node: None for node in graph.connections} __UpperCAmelCase =MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(A_ , A_ ) if priority_queue.is_empty(): return dist, parent # initialization __UpperCAmelCase =priority_queue.extract_min() __UpperCAmelCase =0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: __UpperCAmelCase =dist[node] + graph.connections[node][neighbour] priority_queue.update_key(A_ , dist[neighbour] ) __UpperCAmelCase =node # running prim's algorithm while not priority_queue.is_empty(): __UpperCAmelCase =priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: __UpperCAmelCase =dist[node] + graph.connections[node][neighbour] priority_queue.update_key(A_ , dist[neighbour] ) __UpperCAmelCase =node return dist, parent
68
0
'''simple docstring''' from __future__ import annotations def _lowerCAmelCase ( __snake_case : int , __snake_case : int ) -> list[list[int]]: __A : list[list[int]] = [] create_all_state(1 , __snake_case , __snake_case , [] , __snake_case ) return result def _lowerCAmelCase ( __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : list[int] , __snake_case : list[list[int]] , ) -> None: if level == 0: total_list.append(current_list[:] ) return for i in range(__snake_case , total_number - level + 2 ): current_list.append(__snake_case ) create_all_state(i + 1 , __snake_case , level - 1 , __snake_case , __snake_case ) current_list.pop() def _lowerCAmelCase ( __snake_case : list[list[int]] ) -> None: for i in total_list: print(*__snake_case ) if __name__ == "__main__": lowercase__ : List[str] = 4 lowercase__ : List[str] = 2 lowercase__ : int = generate_all_combinations(n, k) print_all_state(total_list)
8
from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf __A = logging.get_logger(__name__) @dataclass class _A ( UpperCamelCase ): """simple docstring""" lowerCamelCase : Optional[int] = [ 'no_inference', 'no_cuda', 'no_tpu', 'no_speed', 'no_memory', 'no_env_print', 'no_multi_process', ] def __init__( self : Any , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Dict: for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: __UpperCAmelCase =deprecated_arg[3:] __UpperCAmelCase =not kwargs.pop(__SCREAMING_SNAKE_CASE ) logger.warning( f'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or''' f''' {positive_arg}={kwargs[positive_arg]}''' ) __UpperCAmelCase =kwargs.pop("""tpu_name""" , self.tpu_name ) __UpperCAmelCase =kwargs.pop("""device_idx""" , self.device_idx ) __UpperCAmelCase =kwargs.pop("""eager_mode""" , self.eager_mode ) __UpperCAmelCase =kwargs.pop("""use_xla""" , self.use_xla ) super().__init__(**__SCREAMING_SNAKE_CASE ) lowerCamelCase : str = field( default=UpperCamelCase , metadata={'help': 'Name of TPU'} , ) lowerCamelCase : int = field( default=0 , metadata={'help': 'CPU / GPU device index. Defaults to 0.'} , ) lowerCamelCase : bool = field(default=UpperCamelCase , metadata={'help': 'Benchmark models in eager model.'} ) lowerCamelCase : bool = field( default=UpperCamelCase , metadata={ 'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.' } , ) @cached_property def _a ( self : List[str] ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]: requires_backends(self , ["""tf"""] ) __UpperCAmelCase =None if self.tpu: try: if self.tpu_name: __UpperCAmelCase =tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: __UpperCAmelCase =tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: __UpperCAmelCase =None return tpu @cached_property def _a ( self : Tuple ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]: requires_backends(self , ["""tf"""] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) __UpperCAmelCase =tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" ) __UpperCAmelCase =tf.distribute.OneDeviceStrategy(device=f'''/gpu:{self.device_idx}''' ) else: tf.config.set_visible_devices([] , """GPU""" ) # disable GPU __UpperCAmelCase =tf.distribute.OneDeviceStrategy(device=f'''/cpu:{self.device_idx}''' ) return strategy @property def _a ( self : Optional[Any] ) -> bool: requires_backends(self , ["""tf"""] ) return self._setup_tpu is not None @property def _a ( self : str ) -> "tf.distribute.Strategy": requires_backends(self , ["""tf"""] ) return self._setup_strategy @property def _a ( self : Dict ) -> Optional[int]: requires_backends(self , ["""tf"""] ) return tf.config.list_physical_devices("""GPU""" ) @property def _a ( self : List[str] ) -> int: requires_backends(self , ["""tf"""] ) if self.cuda: return len(self.gpu_list ) return 0 @property def _a ( self : List[str] ) -> bool: return self.n_gpu > 0
68
0
from __future__ import annotations from fractions import Fraction def A ( __UpperCamelCase , __UpperCamelCase ) -> bool: return ( num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den ) def A ( __UpperCamelCase ) -> list[str]: A__ = [] A__ = 11 A__ = int('1' + '0' * digit_len ) for num in range(__UpperCamelCase , __UpperCamelCase ): while den <= 99: if (num != den) and (num % 10 == den // 10) and (den % 10 != 0): if is_digit_cancelling(__UpperCamelCase , __UpperCamelCase ): solutions.append(f'''{num}/{den}''' ) den += 1 num += 1 A__ = 10 return solutions def A ( __UpperCamelCase = 2 ) -> int: A__ = 1.0 for fraction in fraction_list(__UpperCamelCase ): A__ = Fraction(__UpperCamelCase ) result *= frac.denominator / frac.numerator return int(__UpperCamelCase ) if __name__ == "__main__": print(solution())
9
import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class _A ( unittest.TestCase ): """simple docstring""" @property def _a ( self : List[str] ) -> Dict: torch.manual_seed(0 ) __UpperCAmelCase =UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , ) return model def _a ( self : int ) -> Union[str, Any]: __UpperCAmelCase =self.dummy_uncond_unet __UpperCAmelCase =ScoreSdeVeScheduler() __UpperCAmelCase =ScoreSdeVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE ) sde_ve.to(__SCREAMING_SNAKE_CASE ) sde_ve.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =torch.manual_seed(0 ) __UpperCAmelCase =sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=__SCREAMING_SNAKE_CASE ).images __UpperCAmelCase =torch.manual_seed(0 ) __UpperCAmelCase =sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )[ 0 ] __UpperCAmelCase =image[0, -3:, -3:, -1] __UpperCAmelCase =image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __UpperCAmelCase =np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class _A ( unittest.TestCase ): """simple docstring""" def _a ( self : Optional[int] ) -> int: __UpperCAmelCase ="""google/ncsnpp-church-256""" __UpperCAmelCase =UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =ScoreSdeVeScheduler.from_pretrained(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =ScoreSdeVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE ) sde_ve.to(__SCREAMING_SNAKE_CASE ) sde_ve.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =torch.manual_seed(0 ) __UpperCAmelCase =sde_ve(num_inference_steps=10 , output_type="""numpy""" , generator=__SCREAMING_SNAKE_CASE ).images __UpperCAmelCase =image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) __UpperCAmelCase =np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
68
0
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def _snake_case ( ): _UpperCamelCase = HfArgumentParser(__snake_case ) _UpperCamelCase = parser.parse_args_into_dataclasses()[0] _UpperCamelCase = TensorFlowBenchmark(args=__snake_case ) try: _UpperCamelCase = parser.parse_args_into_dataclasses()[0] except ValueError as e: _UpperCamelCase = '''Arg --no_{0} is no longer used, please use --no-{0} instead.''' _UpperCamelCase = ''' '''.join(str(__snake_case ).split(''' ''' )[:-1] ) _UpperCamelCase = '''''' _UpperCamelCase = eval(str(__snake_case ).split(''' ''' )[-1] ) _UpperCamelCase = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(__snake_case ) if len(__snake_case ) > 0: _UpperCamelCase = full_error_msg + begin_error_msg + str(__snake_case ) raise ValueError(__snake_case ) benchmark.run() if __name__ == "__main__": main()
10
from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging from transformers.integrations import is_fairscale_available from transformers.models.fsmt.configuration_fsmt import FSMTConfig from transformers.optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.trainer_pt_utils import get_tpu_sampler from transformers.training_args import ParallelMode from transformers.utils import is_torch_tpu_available if is_fairscale_available(): from fairscale.optim import OSS __A = logging.get_logger(__name__) __A = { "linear": get_linear_schedule_with_warmup, "cosine": get_cosine_schedule_with_warmup, "cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup, "polynomial": get_polynomial_decay_schedule_with_warmup, "constant": get_constant_schedule, "constant_w_warmup": get_constant_schedule_with_warmup, } class _A ( UpperCamelCase ): """simple docstring""" def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : str=None , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Any: super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if config is None: assert isinstance(self.model , __SCREAMING_SNAKE_CASE ), ( "If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is" f''' {self.model.__class__}''' ) __UpperCAmelCase =self.model.config else: __UpperCAmelCase =config __UpperCAmelCase =data_args __UpperCAmelCase =self.config.tgt_vocab_size if isinstance(self.config , __SCREAMING_SNAKE_CASE ) else self.config.vocab_size if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss): assert self.config.pad_token_id is not None, ( "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss" " calculation or doing label smoothing." ) if self.config.pad_token_id is None and self.config.eos_token_id is not None: logger.warning( f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for''' """ padding..""" ) if self.args.label_smoothing == 0: __UpperCAmelCase =torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id ) else: # dynamically import label_smoothed_nll_loss from utils import label_smoothed_nll_loss __UpperCAmelCase =label_smoothed_nll_loss def _a ( self : Any , __SCREAMING_SNAKE_CASE : int ) -> Any: if self.optimizer is None: __UpperCAmelCase =["""bias""", """LayerNorm.weight"""] __UpperCAmelCase =[ { """params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )], """weight_decay""": self.args.weight_decay, }, { """params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )], """weight_decay""": 0.0, }, ] __UpperCAmelCase =Adafactor if self.args.adafactor else AdamW if self.args.adafactor: __UpperCAmelCase =Adafactor __UpperCAmelCase ={"""scale_parameter""": False, """relative_step""": False} else: __UpperCAmelCase =AdamW __UpperCAmelCase ={ """betas""": (self.args.adam_betaa, self.args.adam_betaa), """eps""": self.args.adam_epsilon, } __UpperCAmelCase =self.args.learning_rate if self.sharded_ddp: __UpperCAmelCase =OSS( params=__SCREAMING_SNAKE_CASE , optim=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) else: __UpperCAmelCase =optimizer_cls(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if self.lr_scheduler is None: __UpperCAmelCase =self._get_lr_scheduler(__SCREAMING_SNAKE_CASE ) else: # ignoring --lr_scheduler logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" ) def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Any: __UpperCAmelCase =arg_to_scheduler[self.args.lr_scheduler] if self.args.lr_scheduler == "constant": __UpperCAmelCase =schedule_func(self.optimizer ) elif self.args.lr_scheduler == "constant_w_warmup": __UpperCAmelCase =schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps ) else: __UpperCAmelCase =schedule_func( self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__SCREAMING_SNAKE_CASE ) return scheduler def _a ( self : Optional[Any] ) -> Optional[torch.utils.data.Sampler]: if isinstance(self.train_dataset , torch.utils.data.IterableDataset ): return None elif is_torch_tpu_available(): return get_tpu_sampler(self.train_dataset ) else: if self.args.sortish_sampler: self.train_dataset.make_sortish_sampler( self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , ) return ( RandomSampler(self.train_dataset ) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset ) ) def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple: if self.args.label_smoothing == 0: if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token __UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[0] __UpperCAmelCase =self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) ) else: # compute usual loss via models __UpperCAmelCase , __UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[:2] else: # compute label smoothed loss __UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[0] __UpperCAmelCase =torch.nn.functional.log_softmax(__SCREAMING_SNAKE_CASE , dim=-1 ) __UpperCAmelCase , __UpperCAmelCase =self.loss_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.args.label_smoothing , ignore_index=self.config.pad_token_id ) return loss, logits def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Dict: __UpperCAmelCase =inputs.pop("""labels""" ) __UpperCAmelCase , __UpperCAmelCase =self._compute_loss(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return loss def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : nn.Module , __SCREAMING_SNAKE_CASE : Dict[str, Union[torch.Tensor, Any]] , __SCREAMING_SNAKE_CASE : bool , __SCREAMING_SNAKE_CASE : Optional[List[str]] = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: __UpperCAmelCase =self._prepare_inputs(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase ={ """max_length""": self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, """num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams, } if self.args.predict_with_generate and not self.args.prediction_loss_only: __UpperCAmelCase =self.model.generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **__SCREAMING_SNAKE_CASE , ) # in case the batch is shorter than max length, the output should be padded if generated_tokens.shape[-1] < gen_kwargs["max_length"]: __UpperCAmelCase =self._pad_tensors_to_max_len(__SCREAMING_SNAKE_CASE , gen_kwargs["""max_length"""] ) __UpperCAmelCase =inputs.pop("""labels""" ) with torch.no_grad(): # compute loss on predict data __UpperCAmelCase , __UpperCAmelCase =self._compute_loss(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =loss.mean().detach() if self.args.prediction_loss_only: return (loss, None, None) __UpperCAmelCase =generated_tokens if self.args.predict_with_generate else logits if labels.shape[-1] < gen_kwargs["max_length"]: __UpperCAmelCase =self._pad_tensors_to_max_len(__SCREAMING_SNAKE_CASE , gen_kwargs["""max_length"""] ) return (loss, logits, labels) def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int ) -> List[Any]: # If PAD token is not defined at least EOS token has to be defined __UpperCAmelCase =self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id if pad_token_id is None: raise ValueError( """Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be""" f''' padded to `max_length`={max_length}''' ) __UpperCAmelCase =pad_token_id * torch.ones( (tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device ) __UpperCAmelCase =tensor return padded_tensor
68
0
'''simple docstring''' from math import loga def lowerCAmelCase (__A): """simple docstring""" if a < 0: raise ValueError('''Input value must be a positive integer''') elif isinstance(__A , __A): raise TypeError('''Input value must be a \'int\' type''') return 0 if (a == 0) else int(loga(a & -a)) if __name__ == "__main__": import doctest doctest.testmod()
11
import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _A ( UpperCamelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase : List[Any] = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline' def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str=0 ) -> Any: __UpperCAmelCase =floats_tensor((1, 3, 128, 128) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ) __UpperCAmelCase =np.random.RandomState(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase ={ """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 3, """strength""": 0.75, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def _a ( self : Optional[Any] ) -> int: __UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =self.get_dummy_inputs() __UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images __UpperCAmelCase =image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) __UpperCAmelCase =np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def _a ( self : Union[str, Any] ) -> Union[str, Any]: __UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __UpperCAmelCase =PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =self.get_dummy_inputs() __UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images __UpperCAmelCase =image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __UpperCAmelCase =np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _a ( self : Optional[Any] ) -> Dict: __UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __UpperCAmelCase =LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) # warmup pass to apply optimizations __UpperCAmelCase =pipe(**self.get_dummy_inputs() ) __UpperCAmelCase =self.get_dummy_inputs() __UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images __UpperCAmelCase =image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __UpperCAmelCase =np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _a ( self : List[Any] ) -> List[str]: __UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __UpperCAmelCase =EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =self.get_dummy_inputs() __UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images __UpperCAmelCase =image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __UpperCAmelCase =np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _a ( self : Union[str, Any] ) -> Optional[Any]: __UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __UpperCAmelCase =EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =self.get_dummy_inputs() __UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images __UpperCAmelCase =image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __UpperCAmelCase =np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _a ( self : Union[str, Any] ) -> Dict: __UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __UpperCAmelCase =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =self.get_dummy_inputs() __UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images __UpperCAmelCase =image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __UpperCAmelCase =np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class _A ( unittest.TestCase ): """simple docstring""" @property def _a ( self : List[str] ) -> Optional[int]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _a ( self : Dict ) -> int: __UpperCAmelCase =ort.SessionOptions() __UpperCAmelCase =False return options def _a ( self : Dict ) -> Any: __UpperCAmelCase =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) __UpperCAmelCase =init_image.resize((768, 512) ) # using the PNDM scheduler by default __UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained( """CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase ="""A fantasy landscape, trending on artstation""" __UpperCAmelCase =np.random.RandomState(0 ) __UpperCAmelCase =pipe( prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__SCREAMING_SNAKE_CASE , output_type="""np""" , ) __UpperCAmelCase =output.images __UpperCAmelCase =images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) __UpperCAmelCase =np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def _a ( self : List[str] ) -> str: __UpperCAmelCase =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) __UpperCAmelCase =init_image.resize((768, 512) ) __UpperCAmelCase =LMSDiscreteScheduler.from_pretrained( """runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" ) __UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase ="""A fantasy landscape, trending on artstation""" __UpperCAmelCase =np.random.RandomState(0 ) __UpperCAmelCase =pipe( prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__SCREAMING_SNAKE_CASE , output_type="""np""" , ) __UpperCAmelCase =output.images __UpperCAmelCase =images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) __UpperCAmelCase =np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
68
0
import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class _snake_case ( unittest.TestCase ): def lowercase__ ( self): '''simple docstring''' super().tearDown() gc.collect() def lowercase__ ( self): '''simple docstring''' lowercase__ , lowercase__ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained( """stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , ) lowercase__ : Optional[int] = """A painting of a squirrel eating a burger""" lowercase__ : Optional[Any] = jax.device_count() lowercase__ : Optional[int] = num_samples * [prompt] lowercase__ : Tuple = sd_pipe.prepare_inputs(SCREAMING_SNAKE_CASE_) lowercase__ : Union[str, Any] = replicate(SCREAMING_SNAKE_CASE_) lowercase__ : Optional[int] = shard(SCREAMING_SNAKE_CASE_) lowercase__ : Union[str, Any] = jax.random.PRNGKey(0) lowercase__ : Tuple = jax.random.split(SCREAMING_SNAKE_CASE_ , jax.device_count()) lowercase__ : Optional[int] = sd_pipe(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_inference_steps=25 , jit=SCREAMING_SNAKE_CASE_)[0] assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3) lowercase__ : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]) lowercase__ : Optional[Any] = images[0, 2_53:2_56, 2_53:2_56, -1] lowercase__ : Dict = jnp.asarray(jax.device_get(image_slice.flatten())) lowercase__ : Dict = jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2]) print(f'output_slice: {output_slice}') assert jnp.abs(output_slice - expected_slice).max() < 1E-2 def lowercase__ ( self): '''simple docstring''' lowercase__ : Union[str, Any] = """stabilityai/stable-diffusion-2""" lowercase__ , lowercase__ : List[str] = FlaxDPMSolverMultistepScheduler.from_pretrained(SCREAMING_SNAKE_CASE_ , subfolder="""scheduler""") lowercase__ , lowercase__ : int = FlaxStableDiffusionPipeline.from_pretrained( SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , revision="""bf16""" , dtype=jnp.bfloataa , ) lowercase__ : str = scheduler_params lowercase__ : List[Any] = """A painting of a squirrel eating a burger""" lowercase__ : List[Any] = jax.device_count() lowercase__ : str = num_samples * [prompt] lowercase__ : Optional[Any] = sd_pipe.prepare_inputs(SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = replicate(SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = shard(SCREAMING_SNAKE_CASE_) lowercase__ : str = jax.random.PRNGKey(0) lowercase__ : Any = jax.random.split(SCREAMING_SNAKE_CASE_ , jax.device_count()) lowercase__ : Tuple = sd_pipe(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_inference_steps=25 , jit=SCREAMING_SNAKE_CASE_)[0] assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3) lowercase__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]) lowercase__ : List[Any] = images[0, 2_53:2_56, 2_53:2_56, -1] lowercase__ : List[Any] = jnp.asarray(jax.device_get(image_slice.flatten())) lowercase__ : Union[str, Any] = jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7]) print(f'output_slice: {output_slice}') assert jnp.abs(output_slice - expected_slice).max() < 1E-2
12
import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors __A = logging.getLogger(__name__) class _A ( UpperCamelCase ): """simple docstring""" lowerCamelCase : Optional[Any] = 'sequence-classification' def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Tuple ) -> Optional[Any]: if type(__SCREAMING_SNAKE_CASE ) == dict: __UpperCAmelCase =Namespace(**__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =glue_output_modes[hparams.task] __UpperCAmelCase =glue_tasks_num_labels[hparams.task] super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.mode ) def _a ( self : str , **__SCREAMING_SNAKE_CASE : Dict ) -> List[str]: return self.model(**__SCREAMING_SNAKE_CASE ) def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict ) -> List[Any]: __UpperCAmelCase ={"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: __UpperCAmelCase =batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None __UpperCAmelCase =self(**__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =outputs[0] __UpperCAmelCase =self.trainer.lr_schedulers[0]["""scheduler"""] __UpperCAmelCase ={"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def _a ( self : Tuple ) -> List[Any]: __UpperCAmelCase =self.hparams __UpperCAmelCase =processors[args.task]() __UpperCAmelCase =processor.get_labels() for mode in ["train", "dev"]: __UpperCAmelCase =self._feature_file(__SCREAMING_SNAKE_CASE ) if os.path.exists(__SCREAMING_SNAKE_CASE ) and not args.overwrite_cache: logger.info("""Loading features from cached file %s""" , __SCREAMING_SNAKE_CASE ) else: logger.info("""Creating features from dataset file at %s""" , args.data_dir ) __UpperCAmelCase =( processor.get_dev_examples(args.data_dir ) if mode == """dev""" else processor.get_train_examples(args.data_dir ) ) __UpperCAmelCase =convert_examples_to_features( __SCREAMING_SNAKE_CASE , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info("""Saving features into cached file %s""" , __SCREAMING_SNAKE_CASE ) torch.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : bool = False ) -> DataLoader: __UpperCAmelCase ="""dev""" if mode == """test""" else mode __UpperCAmelCase =self._feature_file(__SCREAMING_SNAKE_CASE ) logger.info("""Loading features from cached file %s""" , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =torch.load(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =torch.tensor([f.input_ids for f in features] , dtype=torch.long ) __UpperCAmelCase =torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) __UpperCAmelCase =torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) if self.hparams.glue_output_mode == "classification": __UpperCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.long ) elif self.hparams.glue_output_mode == "regression": __UpperCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.float ) return DataLoader( TensorDataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , batch_size=__SCREAMING_SNAKE_CASE , shuffle=__SCREAMING_SNAKE_CASE , ) def _a ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int ) -> str: __UpperCAmelCase ={"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: __UpperCAmelCase =batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None __UpperCAmelCase =self(**__SCREAMING_SNAKE_CASE ) __UpperCAmelCase , __UpperCAmelCase =outputs[:2] __UpperCAmelCase =logits.detach().cpu().numpy() __UpperCAmelCase =inputs["""labels"""].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Any ) -> tuple: __UpperCAmelCase =torch.stack([x["""val_loss"""] for x in outputs] ).mean().detach().cpu().item() __UpperCAmelCase =np.concatenate([x["""pred"""] for x in outputs] , axis=0 ) if self.hparams.glue_output_mode == "classification": __UpperCAmelCase =np.argmax(__SCREAMING_SNAKE_CASE , axis=1 ) elif self.hparams.glue_output_mode == "regression": __UpperCAmelCase =np.squeeze(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =np.concatenate([x["""target"""] for x in outputs] , axis=0 ) __UpperCAmelCase =[[] for _ in range(out_label_ids.shape[0] )] __UpperCAmelCase =[[] for _ in range(out_label_ids.shape[0] )] __UpperCAmelCase ={**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )} __UpperCAmelCase =dict(results.items() ) __UpperCAmelCase =results return ret, preds_list, out_label_list def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : list ) -> dict: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._eval_end(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =ret["""log"""] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : List[str] ) -> dict: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._eval_end(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =ret["""log"""] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def _a ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict ) -> Optional[Any]: BaseTransformer.add_model_specific_args(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) parser.add_argument( """--max_seq_length""" , default=128 , type=__SCREAMING_SNAKE_CASE , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--task""" , default="""""" , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""The GLUE task to run""" , ) parser.add_argument( """--gpus""" , default=0 , type=__SCREAMING_SNAKE_CASE , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , ) parser.add_argument( """--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" ) return parser def lowercase__ ( ) -> str: """simple docstring""" __UpperCAmelCase =argparse.ArgumentParser() add_generic_args(A_ , os.getcwd() ) __UpperCAmelCase =GLUETransformer.add_model_specific_args(A_ , os.getcwd() ) __UpperCAmelCase =parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: __UpperCAmelCase =os.path.join( """./results""" , F'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' , ) os.makedirs(args.output_dir ) __UpperCAmelCase =GLUETransformer(A_ ) __UpperCAmelCase =generic_train(A_ , A_ ) # Optionally, predict on dev set and write to output_dir if args.do_predict: __UpperCAmelCase =sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=A_ ) ) __UpperCAmelCase =model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(A_ ) if __name__ == "__main__": main()
68
0
'''simple docstring''' import darl # noqa import gym import tqdm from diffusers.experimental import ValueGuidedRLPipeline A__ : Optional[Any] = { """n_samples""": 64, """horizon""": 32, """num_inference_steps""": 20, """n_guide_steps""": 2, # can set to 0 for faster sampling, does not use value network """scale_grad_by_std""": True, """scale""": 0.1, """eta""": 0.0, """t_grad_cutoff""": 2, """device""": """cpu""", } if __name__ == "__main__": A__ : Optional[Any] = """hopper-medium-v2""" A__ : str = gym.make(env_name) A__ : Union[str, Any] = ValueGuidedRLPipeline.from_pretrained( """bglick13/hopper-medium-v2-value-function-hor32""", env=env, ) env.seed(0) A__ : int = env.reset() A__ : Tuple = 0 A__ : int = 0 A__ : Tuple = 1000 A__ : str = [obs.copy()] try: for t in tqdm.tqdm(range(T)): # call the policy A__ : Any = pipeline(obs, planning_horizon=32) # execute action in environment A__ , A__ , A__ , A__ : Dict = env.step(denorm_actions) A__ : Optional[int] = env.get_normalized_score(total_reward) # update return total_reward += reward total_score += score print( f'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:''' f''' {total_score}''' ) # save observations for rendering rollout.append(next_observation.copy()) A__ : List[Any] = next_observation except KeyboardInterrupt: pass print(f'''Total reward: {total_reward}''')
13
def lowercase__ ( A_: int , A_: int ) -> int: """simple docstring""" return 1 if input_a == input_a else 0 def lowercase__ ( ) -> None: """simple docstring""" assert xnor_gate(0 , 0 ) == 1 assert xnor_gate(0 , 1 ) == 0 assert xnor_gate(1 , 0 ) == 0 assert xnor_gate(1 , 1 ) == 1 if __name__ == "__main__": print(xnor_gate(0, 0)) print(xnor_gate(0, 1)) print(xnor_gate(1, 0)) print(xnor_gate(1, 1))
68
0
import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def __UpperCAmelCase ( __a : bytes ,__a : int ) -> np.array: """simple docstring""" _a : int = F"""{sampling_rate}""" _a : str = '''1''' _a : Optional[int] = '''f32le''' _a : Optional[Any] = [ '''ffmpeg''', '''-i''', '''pipe:0''', '''-ac''', ac, '''-ar''', ar, '''-f''', format_for_conversion, '''-hide_banner''', '''-loglevel''', '''quiet''', '''pipe:1''', ] try: with subprocess.Popen(__a ,stdin=subprocess.PIPE ,stdout=subprocess.PIPE ) as ffmpeg_process: _a : Any = ffmpeg_process.communicate(__a ) except FileNotFoundError as error: raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error _a : Optional[Any] = output_stream[0] _a : Optional[int] = np.frombuffer(__a ,np.floataa ) if audio.shape[0] == 0: raise ValueError('''Malformed soundfile''' ) return audio def __UpperCAmelCase ( __a : int ,__a : float ,__a : str = "f32le" ,) -> str: """simple docstring""" _a : Dict = F"""{sampling_rate}""" _a : Optional[Any] = '''1''' if format_for_conversion == "s16le": _a : Dict = 2 elif format_for_conversion == "f32le": _a : Optional[Any] = 4 else: raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" ) _a : Dict = platform.system() if system == "Linux": _a : Dict = '''alsa''' _a : Union[str, Any] = '''default''' elif system == "Darwin": _a : Union[str, Any] = '''avfoundation''' _a : List[str] = ''':0''' elif system == "Windows": _a : Optional[int] = '''dshow''' _a : str = '''default''' _a : Tuple = [ '''ffmpeg''', '''-f''', format_, '''-i''', input_, '''-ac''', ac, '''-ar''', ar, '''-f''', format_for_conversion, '''-fflags''', '''nobuffer''', '''-hide_banner''', '''-loglevel''', '''quiet''', '''pipe:1''', ] _a : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample _a : str = _ffmpeg_stream(__a ,__a ) for item in iterator: yield item def __UpperCAmelCase ( __a : int ,__a : float ,__a : Optional[int] = None ,__a : Optional[Union[Tuple[float, float], float]] = None ,__a : str = "f32le" ,) -> Optional[int]: """simple docstring""" if stream_chunk_s is not None: _a : Tuple = stream_chunk_s else: _a : Tuple = chunk_length_s _a : Tuple = ffmpeg_microphone(__a ,__a ,format_for_conversion=__a ) if format_for_conversion == "s16le": _a : Any = np.intaa _a : Optional[int] = 2 elif format_for_conversion == "f32le": _a : Dict = np.floataa _a : List[Any] = 4 else: raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" ) if stride_length_s is None: _a : List[Any] = chunk_length_s / 6 _a : Optional[int] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(__a ,(int, float) ): _a : Optional[Any] = [stride_length_s, stride_length_s] _a : Optional[Any] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample _a : str = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample _a : Optional[Any] = datetime.datetime.now() _a : Tuple = datetime.timedelta(seconds=__a ) for item in chunk_bytes_iter(__a ,__a ,stride=(stride_left, stride_right) ,stream=__a ): # Put everything back in numpy scale _a : Dict = np.frombuffer(item['''raw'''] ,dtype=__a ) _a : Dict = ( item['''stride'''][0] // size_of_sample, item['''stride'''][1] // size_of_sample, ) _a : str = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def __UpperCAmelCase ( __a : Optional[int] ,__a : int ,__a : Tuple[int, int] ,__a : bool = False ) -> Optional[int]: """simple docstring""" _a : Any = b'''''' _a , _a : List[str] = stride if stride_left + stride_right >= chunk_len: raise ValueError( F"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" ) _a : List[str] = 0 for raw in iterator: acc += raw if stream and len(__a ) < chunk_len: _a : Dict = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(__a ) >= chunk_len: # We are flushing the accumulator _a : List[str] = (_stride_left, stride_right) _a : List[Any] = {'''raw''': acc[:chunk_len], '''stride''': stride} if stream: _a : List[Any] = False yield item _a : Optional[Any] = stride_left _a : Optional[Any] = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(__a ) > stride_left: _a : Optional[Any] = {'''raw''': acc, '''stride''': (_stride_left, 0)} if stream: _a : Dict = False yield item def __UpperCAmelCase ( __a : int ,__a : int ) -> Tuple: """simple docstring""" _a : Dict = 2**24 # 16Mo try: with subprocess.Popen(__a ,stdout=subprocess.PIPE ,bufsize=__a ) as ffmpeg_process: while True: _a : int = ffmpeg_process.stdout.read(__a ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
14
from __future__ import annotations import bisect def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> int: """simple docstring""" if hi < 0: __UpperCAmelCase =len(A_ ) while lo < hi: __UpperCAmelCase =lo + (hi - lo) // 2 if sorted_collection[mid] < item: __UpperCAmelCase =mid + 1 else: __UpperCAmelCase =mid return lo def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> int: """simple docstring""" if hi < 0: __UpperCAmelCase =len(A_ ) while lo < hi: __UpperCAmelCase =lo + (hi - lo) // 2 if sorted_collection[mid] <= item: __UpperCAmelCase =mid + 1 else: __UpperCAmelCase =mid return lo def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> None: """simple docstring""" sorted_collection.insert(bisect_left(A_ , A_ , A_ , A_ ) , A_ ) def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> None: """simple docstring""" sorted_collection.insert(bisect_right(A_ , A_ , A_ , A_ ) , A_ ) def lowercase__ ( A_: list[int] , A_: int ) -> int | None: """simple docstring""" __UpperCAmelCase =0 __UpperCAmelCase =len(A_ ) - 1 while left <= right: __UpperCAmelCase =left + (right - left) // 2 __UpperCAmelCase =sorted_collection[midpoint] if current_item == item: return midpoint elif item < current_item: __UpperCAmelCase =midpoint - 1 else: __UpperCAmelCase =midpoint + 1 return None def lowercase__ ( A_: list[int] , A_: int ) -> int | None: """simple docstring""" __UpperCAmelCase =bisect.bisect_left(A_ , A_ ) if index != len(A_ ) and sorted_collection[index] == item: return index return None def lowercase__ ( A_: list[int] , A_: int , A_: int , A_: int ) -> int | None: """simple docstring""" if right < left: return None __UpperCAmelCase =left + (right - left) // 2 if sorted_collection[midpoint] == item: return midpoint elif sorted_collection[midpoint] > item: return binary_search_by_recursion(A_ , A_ , A_ , midpoint - 1 ) else: return binary_search_by_recursion(A_ , A_ , midpoint + 1 , A_ ) if __name__ == "__main__": __A = input("Enter numbers separated by comma:\n").strip() __A = sorted(int(item) for item in user_input.split(",")) __A = int(input("Enter a single number to be found in the list:\n")) __A = binary_search(collection, target) if result is None: print(F"""{target} was not found in {collection}.""") else: print(F"""{target} was found at position {result} in {collection}.""")
68
0
import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration A : Optional[Any] = [ # tf -> hf ('/', '.'), ('layer_', 'layers.'), ('kernel', 'weight'), ('beta', 'bias'), ('gamma', 'weight'), ('pegasus', 'model'), ] A : Union[str, Any] = [ ('.output.dense', '.fc2'), ('intermediate.LayerNorm', 'final_layer_norm'), ('intermediate.dense', 'fc1'), ] A : List[Any] = ( INIT_COMMON + [ ('attention.self.LayerNorm', 'self_attn_layer_norm'), ('attention.output.dense', 'self_attn.out_proj'), ('attention.self', 'self_attn'), ('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'), ('attention.encdec_output.dense', 'encoder_attn.out_proj'), ('attention.encdec', 'encoder_attn'), ('key', 'k_proj'), ('value', 'v_proj'), ('query', 'q_proj'), ('decoder.LayerNorm', 'decoder.layernorm_embedding'), ] + END_COMMON ) A : str = ( INIT_COMMON + [ ('embeddings.word_embeddings', 'shared.weight'), ('embeddings.position_embeddings', 'embed_positions.weight'), ('attention.self.LayerNorm', 'self_attn_layer_norm'), ('attention.output.dense', 'self_attn.output'), ('attention.self', 'self_attn.self'), ('encoder.LayerNorm', 'encoder.layernorm_embedding'), ] + END_COMMON ) A : str = [ 'encdec/key/bias', 'encdec/query/bias', 'encdec/value/bias', 'self/key/bias', 'self/query/bias', 'self/value/bias', 'encdec_output/dense/bias', 'attention/output/dense/bias', ] def UpperCamelCase ( __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] ) -> int: """simple docstring""" for tf_name, hf_name in patterns: lowercase__ = k.replace(__magic_name__ , __magic_name__ ) return k def UpperCamelCase ( __magic_name__ : dict , __magic_name__ : dict ) -> BigBirdPegasusForConditionalGeneration: """simple docstring""" lowercase__ = BigBirdPegasusConfig(**__magic_name__ ) lowercase__ = BigBirdPegasusForConditionalGeneration(__magic_name__ ) lowercase__ = torch_model.state_dict() lowercase__ = {} # separating decoder weights lowercase__ = {k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""" )} lowercase__ = {k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""" )} for k, v in tqdm(decoder_weights.items() , """tf -> hf conversion""" ): lowercase__ = [k.endswith(__magic_name__ ) for ending in KEYS_TO_IGNORE] if any(__magic_name__ ): continue lowercase__ = DECODER_PATTERNS lowercase__ = rename_state_dict_key(__magic_name__ , __magic_name__ ) if new_k not in state_dict: raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' ) if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ): lowercase__ = v.T lowercase__ = torch.from_numpy(__magic_name__ ) assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}''' for k, v in tqdm(remaining_weights.items() , """tf -> hf conversion""" ): lowercase__ = [k.endswith(__magic_name__ ) for ending in KEYS_TO_IGNORE] if any(__magic_name__ ): continue lowercase__ = REMAINING_PATTERNS lowercase__ = rename_state_dict_key(__magic_name__ , __magic_name__ ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' ) if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ): lowercase__ = v.T lowercase__ = torch.from_numpy(__magic_name__ ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}''' lowercase__ = mapping["""model.embed_positions.weight"""] lowercase__ = mapping.pop("""model.embed_positions.weight""" ) lowercase__ , lowercase__ = torch_model.load_state_dict(__magic_name__ , strict=__magic_name__ ) lowercase__ = [ k for k in missing if k not in [ """final_logits_bias""", """model.encoder.embed_tokens.weight""", """model.decoder.embed_tokens.weight""", """lm_head.weight""", ] ] assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}''' assert extra == [], f'''no matches found for the following tf keys {extra}''' return torch_model def UpperCamelCase ( __magic_name__ : Tuple ) -> Dict: """simple docstring""" lowercase__ = tf.train.list_variables(__magic_name__ ) lowercase__ = {} lowercase__ = ["""global_step"""] for name, shape in tqdm(__magic_name__ , desc="""converting tf checkpoint to dict""" ): lowercase__ = any(pat in name for pat in ignore_name ) if skip_key: continue lowercase__ = tf.train.load_variable(__magic_name__ , __magic_name__ ) lowercase__ = array return tf_weights def UpperCamelCase ( __magic_name__ : str , __magic_name__ : str , __magic_name__ : dict ) -> int: """simple docstring""" lowercase__ = get_tf_weights_as_numpy(__magic_name__ ) lowercase__ = convert_bigbird_pegasus(__magic_name__ , __magic_name__ ) torch_model.save_pretrained(__magic_name__ ) if __name__ == "__main__": A : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables') parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.') A : Any = parser.parse_args() A : Optional[Any] = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
15
from typing import List from .keymap import KEYMAP, get_character def lowercase__ ( A_: str ) -> str: """simple docstring""" def decorator(A_: int ): __UpperCAmelCase =getattr(A_ , """handle_key""" , [] ) handle += [key] setattr(A_ , """handle_key""" , A_ ) return func return decorator def lowercase__ ( *A_: List[str] ) -> Optional[int]: """simple docstring""" def decorator(A_: Tuple ): __UpperCAmelCase =getattr(A_ , """handle_key""" , [] ) handle += keys setattr(A_ , """handle_key""" , A_ ) return func return decorator class _A ( UpperCamelCase ): """simple docstring""" def __new__( cls : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] ) -> int: __UpperCAmelCase =super().__new__(cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if not hasattr(__SCREAMING_SNAKE_CASE , """key_handler""" ): setattr(__SCREAMING_SNAKE_CASE , """key_handler""" , {} ) setattr(__SCREAMING_SNAKE_CASE , """handle_input""" , KeyHandler.handle_input ) for value in attrs.values(): __UpperCAmelCase =getattr(__SCREAMING_SNAKE_CASE , """handle_key""" , [] ) for key in handled_keys: __UpperCAmelCase =value return new_cls @staticmethod def _a ( cls : Dict ) -> List[Any]: __UpperCAmelCase =get_character() if char != KEYMAP["undefined"]: __UpperCAmelCase =ord(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =cls.key_handler.get(__SCREAMING_SNAKE_CASE ) if handler: __UpperCAmelCase =char return handler(cls ) else: return None def lowercase__ ( cls: str ) -> int: """simple docstring""" return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
68
0
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, nightly, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class _SCREAMING_SNAKE_CASE ( __snake_case , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = LDMTextToImagePipeline lowerCamelCase__ = TEXT_TO_IMAGE_PARAMS - { "negative_prompt", "negative_prompt_embeds", "cross_attention_kwargs", "prompt_embeds", } lowerCamelCase__ = PipelineTesterMixin.required_optional_params - { "num_images_per_prompt", "callback", "callback_steps", } lowerCamelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS lowerCamelCase__ = False def _snake_case ( self : List[Any] ): torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) SCREAMING_SNAKE_CASE = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = AutoencoderKL( block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , latent_channels=4 , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) SCREAMING_SNAKE_CASE = CLIPTextModel(__lowerCamelCase ) SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) SCREAMING_SNAKE_CASE = { "unet": unet, "scheduler": scheduler, "vqvae": vae, "bert": text_encoder, "tokenizer": tokenizer, } return components def _snake_case ( self : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple=0 ): if str(__lowerCamelCase ).startswith("mps" ): SCREAMING_SNAKE_CASE = torch.manual_seed(__lowerCamelCase ) else: SCREAMING_SNAKE_CASE = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase ) SCREAMING_SNAKE_CASE = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def _snake_case ( self : List[str] ): SCREAMING_SNAKE_CASE = "cpu" # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE = self.get_dummy_components() SCREAMING_SNAKE_CASE = LDMTextToImagePipeline(**__lowerCamelCase ) pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__lowerCamelCase ) SCREAMING_SNAKE_CASE = pipe(**__lowerCamelCase ).images SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 16, 16, 3) SCREAMING_SNAKE_CASE = np.array([0.6_101, 0.6_156, 0.5_622, 0.4_895, 0.6_661, 0.3_804, 0.5_748, 0.6_136, 0.5_014] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @slow @require_torch_gpu class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : str ): super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any]=torch.floataa , __lowerCamelCase : List[Any]=0 ): SCREAMING_SNAKE_CASE = torch.manual_seed(__lowerCamelCase ) SCREAMING_SNAKE_CASE = np.random.RandomState(__lowerCamelCase ).standard_normal((1, 4, 32, 32) ) SCREAMING_SNAKE_CASE = torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase , dtype=__lowerCamelCase ) SCREAMING_SNAKE_CASE = { "prompt": "A painting of a squirrel eating a burger", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.get_inputs(__lowerCamelCase ) SCREAMING_SNAKE_CASE = pipe(**__lowerCamelCase ).images SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 256, 256, 3) SCREAMING_SNAKE_CASE = np.array([0.51_825, 0.52_850, 0.52_543, 0.54_258, 0.52_304, 0.52_569, 0.54_363, 0.55_276, 0.56_878] ) SCREAMING_SNAKE_CASE = np.abs(expected_slice - image_slice ).max() assert max_diff < 1e-3 @nightly @require_torch_gpu class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : List[Any] ): super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : List[str]=torch.floataa , __lowerCamelCase : List[str]=0 ): SCREAMING_SNAKE_CASE = torch.manual_seed(__lowerCamelCase ) SCREAMING_SNAKE_CASE = np.random.RandomState(__lowerCamelCase ).standard_normal((1, 4, 32, 32) ) SCREAMING_SNAKE_CASE = torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase , dtype=__lowerCamelCase ) SCREAMING_SNAKE_CASE = { "prompt": "A painting of a squirrel eating a burger", "latents": latents, "generator": generator, "num_inference_steps": 50, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def _snake_case ( self : Any ): SCREAMING_SNAKE_CASE = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.get_inputs(__lowerCamelCase ) SCREAMING_SNAKE_CASE = pipe(**__lowerCamelCase ).images[0] SCREAMING_SNAKE_CASE = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy" ) SCREAMING_SNAKE_CASE = np.abs(expected_image - image ).max() assert max_diff < 1e-3
16
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline else: from .camera import create_pan_cameras from .pipeline_shap_e import ShapEPipeline from .pipeline_shap_e_img2img import ShapEImgaImgPipeline from .renderer import ( BoundingBoxVolume, ImportanceRaySampler, MLPNeRFModelOutput, MLPNeRSTFModel, ShapEParamsProjModel, ShapERenderer, StratifiedRaySampler, VoidNeRFModel, )
68
0
import unittest import numpy as np from transformers import RobertaPreLayerNormConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, ) class lowerCamelCase_ ( unittest.TestCase ): def __init__( self : int , __A : int , __A : Optional[Any]=13 , __A : Any=7 , __A : Any=True , __A : List[Any]=True , __A : str=True , __A : int=True , __A : Optional[Any]=99 , __A : Dict=32 , __A : int=5 , __A : Optional[int]=4 , __A : Dict=37 , __A : List[str]="gelu" , __A : str=0.1 , __A : int=0.1 , __A : Optional[int]=512 , __A : Optional[int]=16 , __A : List[str]=2 , __A : List[Any]=0.0_2 , __A : int=4 , ): __A : Tuple = parent __A : Union[str, Any] = batch_size __A : int = seq_length __A : Any = is_training __A : Optional[int] = use_attention_mask __A : List[Any] = use_token_type_ids __A : Optional[int] = use_labels __A : Optional[Any] = vocab_size __A : Tuple = hidden_size __A : int = num_hidden_layers __A : List[Any] = num_attention_heads __A : Optional[Any] = intermediate_size __A : Optional[int] = hidden_act __A : Any = hidden_dropout_prob __A : List[Any] = attention_probs_dropout_prob __A : Any = max_position_embeddings __A : Tuple = type_vocab_size __A : List[str] = type_sequence_label_size __A : List[str] = initializer_range __A : List[Any] = num_choices def lowerCAmelCase_ ( self : List[Any] ): __A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __A : Optional[Any] = None if self.use_attention_mask: __A : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) __A : Any = None if self.use_token_type_ids: __A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __A : Dict = RobertaPreLayerNormConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__A , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowerCAmelCase_ ( self : int ): __A : Optional[int] = self.prepare_config_and_inputs() __A , __A , __A , __A : Tuple = config_and_inputs __A : str = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict def lowerCAmelCase_ ( self : Tuple ): __A : Tuple = self.prepare_config_and_inputs() __A , __A , __A , __A : List[Any] = config_and_inputs __A : Optional[int] = True __A : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __A : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax # Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40 class lowerCamelCase_ ( _lowercase , unittest.TestCase ): _lowercase : str = True _lowercase : Any = ( ( FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, ) if is_flax_available() else () ) def lowerCAmelCase_ ( self : List[str] ): __A : List[Any] = FlaxRobertaPreLayerNormModelTester(self ) @slow def lowerCAmelCase_ ( self : Any ): for model_class_name in self.all_model_classes: __A : List[Any] = model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=__A ) __A : Dict = model(np.ones((1, 1) ) ) self.assertIsNotNone(__A ) @require_flax class lowerCamelCase_ ( unittest.TestCase ): @slow def lowerCAmelCase_ ( self : int ): __A : Dict = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=__A ) __A : str = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa ) __A : List[Any] = model(__A )[0] __A : str = [1, 11, 5_0265] self.assertEqual(list(output.shape ) , __A ) # compare the actual values for a slice. __A : int = np.array( [[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , __A , atol=1e-4 ) ) @slow def lowerCAmelCase_ ( self : Any ): __A : Union[str, Any] = FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=__A ) __A : Dict = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa ) __A : Optional[int] = model(__A )[0] # compare the actual values for a slice. __A : Optional[int] = np.array( [[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , __A , atol=1e-4 ) )
17
import unittest import numpy as np from transformers import RobertaConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.roberta.modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, ) class _A ( unittest.TestCase ): """simple docstring""" def __init__( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=13 , __SCREAMING_SNAKE_CASE : Dict=7 , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : Optional[Any]=99 , __SCREAMING_SNAKE_CASE : Dict=32 , __SCREAMING_SNAKE_CASE : int=5 , __SCREAMING_SNAKE_CASE : Dict=4 , __SCREAMING_SNAKE_CASE : str=37 , __SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : str=512 , __SCREAMING_SNAKE_CASE : Dict=16 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 , __SCREAMING_SNAKE_CASE : List[str]=4 , ) -> Optional[Any]: __UpperCAmelCase =parent __UpperCAmelCase =batch_size __UpperCAmelCase =seq_length __UpperCAmelCase =is_training __UpperCAmelCase =use_attention_mask __UpperCAmelCase =use_token_type_ids __UpperCAmelCase =use_labels __UpperCAmelCase =vocab_size __UpperCAmelCase =hidden_size __UpperCAmelCase =num_hidden_layers __UpperCAmelCase =num_attention_heads __UpperCAmelCase =intermediate_size __UpperCAmelCase =hidden_act __UpperCAmelCase =hidden_dropout_prob __UpperCAmelCase =attention_probs_dropout_prob __UpperCAmelCase =max_position_embeddings __UpperCAmelCase =type_vocab_size __UpperCAmelCase =type_sequence_label_size __UpperCAmelCase =initializer_range __UpperCAmelCase =num_choices def _a ( self : List[Any] ) -> List[str]: __UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase =None if self.use_attention_mask: __UpperCAmelCase =random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase =None if self.use_token_type_ids: __UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCAmelCase =RobertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _a ( self : Tuple ) -> Optional[int]: __UpperCAmelCase =self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =config_and_inputs __UpperCAmelCase ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict def _a ( self : List[str] ) -> Dict: __UpperCAmelCase =self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =config_and_inputs __UpperCAmelCase =True __UpperCAmelCase =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax class _A ( UpperCamelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase : Union[str, Any] = True lowerCamelCase : Union[str, Any] = ( ( FlaxRobertaModel, FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, ) if is_flax_available() else () ) def _a ( self : List[Any] ) -> List[str]: __UpperCAmelCase =FlaxRobertaModelTester(self ) @slow def _a ( self : Optional[Any] ) -> List[Any]: for model_class_name in self.all_model_classes: __UpperCAmelCase =model_class_name.from_pretrained("""roberta-base""" , from_pt=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model(np.ones((1, 1) ) ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
68
0
'''simple docstring''' from __future__ import annotations def __a(SCREAMING_SNAKE_CASE_ : list ): '''simple docstring''' if not nums: raise ValueError("List is empty" ) return sum(SCREAMING_SNAKE_CASE_ ) / len(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": import doctest doctest.testmod()
18
from __future__ import annotations def lowercase__ ( A_: list[list[int]] ) -> int: """simple docstring""" for i in range(1 , len(matrix[0] ) ): matrix[0][i] += matrix[0][i - 1] # preprocessing the first column for i in range(1 , len(A_ ) ): matrix[i][0] += matrix[i - 1][0] # updating the path cost for current position for i in range(1 , len(A_ ) ): for j in range(1 , len(matrix[0] ) ): matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] ) return matrix[-1][-1] if __name__ == "__main__": import doctest doctest.testmod()
68
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _a = { """configuration_resnet""": ["""RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ResNetConfig""", """ResNetOnnxConfig"""] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ """RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """ResNetForImageClassification""", """ResNetModel""", """ResNetPreTrainedModel""", """ResNetBackbone""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ """TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFResNetForImageClassification""", """TFResNetModel""", """TFResNetPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ """FlaxResNetForImageClassification""", """FlaxResNetModel""", """FlaxResNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys _a = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
19
import numpy as np from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey def lowercase__ ( A_: int , A_: int , A_: int , A_: int , A_: int , A_: int ) -> np.ndarray: """simple docstring""" if (ksize % 2) == 0: __UpperCAmelCase =ksize + 1 __UpperCAmelCase =np.zeros((ksize, ksize) , dtype=np.floataa ) # each value for y in range(A_ ): for x in range(A_ ): # distance from center __UpperCAmelCase =x - ksize // 2 __UpperCAmelCase =y - ksize // 2 # degree to radiant __UpperCAmelCase =theta / 180 * np.pi __UpperCAmelCase =np.cos(_theta ) __UpperCAmelCase =np.sin(_theta ) # get kernel x __UpperCAmelCase =cos_theta * px + sin_theta * py # get kernel y __UpperCAmelCase =-sin_theta * px + cos_theta * py # fill kernel __UpperCAmelCase =np.exp( -(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi ) return gabor if __name__ == "__main__": import doctest doctest.testmod() # read original image __A = imread("../image_data/lena.jpg") # turn image in gray scale value __A = cvtColor(img, COLOR_BGR2GRAY) # Apply multiple Kernel to detect edges __A = np.zeros(gray.shape[:2]) for theta in [0, 30, 60, 90, 1_20, 1_50]: __A = gabor_filter_kernel(10, 8, theta, 10, 0, 0) out += filteraD(gray, CV_8UC3, kernel_aa) __A = out / out.max() * 2_55 __A = out.astype(np.uinta) imshow("Original", gray) imshow("Gabor filter with 20x20 mask and 6 directions", out) waitKey(0)
68
0
import copy import unittest from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, ) from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class lowercase_ : def __init__( self , lowercase_ , lowercase_=2 , lowercase_=3 , lowercase_=4 , lowercase_=2 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=99 , lowercase_=36 , lowercase_=3 , lowercase_=4 , lowercase_=37 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=16 , lowercase_=2 , lowercase_=0.02 , lowercase_=6 , lowercase_=6 , lowercase_=3 , lowercase_=4 , lowercase_=None , lowercase_=1000 , ) -> str: a__ =parent a__ =batch_size a__ =num_channels a__ =image_size a__ =patch_size a__ =text_seq_length a__ =is_training a__ =use_input_mask a__ =use_token_type_ids a__ =use_labels a__ =vocab_size a__ =hidden_size a__ =num_hidden_layers a__ =num_attention_heads a__ =intermediate_size a__ =hidden_act a__ =hidden_dropout_prob a__ =attention_probs_dropout_prob a__ =max_position_embeddings a__ =type_vocab_size a__ =type_sequence_label_size a__ =initializer_range a__ =coordinate_size a__ =shape_size a__ =num_labels a__ =num_choices a__ =scope a__ =range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) a__ =text_seq_length a__ =(image_size // patch_size) ** 2 + 1 a__ =self.text_seq_length + self.image_seq_length def __UpperCamelCase ( self) -> Optional[Any]: a__ =ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size) a__ =ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox) # Ensure that bbox is legal for i in range(bbox.shape[0]): for j in range(bbox.shape[1]): if bbox[i, j, 3] < bbox[i, j, 1]: a__ =bbox[i, j, 3] a__ =bbox[i, j, 1] a__ =t if bbox[i, j, 2] < bbox[i, j, 0]: a__ =bbox[i, j, 2] a__ =bbox[i, j, 0] a__ =t a__ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) a__ =None if self.use_input_mask: a__ =random_attention_mask([self.batch_size, self.text_seq_length]) a__ =None if self.use_token_type_ids: a__ =ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size) a__ =None a__ =None if self.use_labels: a__ =ids_tensor([self.batch_size] , self.type_sequence_label_size) a__ =ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels) a__ =LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> str: a__ =LayoutLMvaModel(config=lowercase_) model.to(lowercase_) model.eval() # text + image a__ =model(lowercase_ , pixel_values=lowercase_) a__ =model( lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_) a__ =model(lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , token_type_ids=lowercase_) a__ =model(lowercase_ , bbox=lowercase_ , pixel_values=lowercase_) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) # text only a__ =model(lowercase_) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size)) # image only a__ =model(pixel_values=lowercase_) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size)) def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> List[str]: a__ =self.num_labels a__ =LayoutLMvaForSequenceClassification(lowercase_) model.to(lowercase_) model.eval() a__ =model( lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> str: a__ =self.num_labels a__ =LayoutLMvaForTokenClassification(config=lowercase_) model.to(lowercase_) model.eval() a__ =model( lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels)) def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> str: a__ =LayoutLMvaForQuestionAnswering(config=lowercase_) model.to(lowercase_) model.eval() a__ =model( lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def __UpperCamelCase ( self) -> Tuple: a__ =self.prepare_config_and_inputs() ( ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ) =config_and_inputs a__ ={ 'input_ids': input_ids, 'bbox': bbox, 'pixel_values': pixel_values, 'token_type_ids': token_type_ids, 'attention_mask': input_mask, } return config, inputs_dict @require_torch class lowercase_ (lowercase__ , lowercase__ , unittest.TestCase ): snake_case =False snake_case =False snake_case =False snake_case =( ( LayoutLMvaModel, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaForQuestionAnswering, ) if is_torch_available() else () ) snake_case =( {'document-question-answering': LayoutLMvaForQuestionAnswering, 'feature-extraction': LayoutLMvaModel} if is_torch_available() else {} ) def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Dict: # `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual # embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has # the sequence dimension of the text embedding only. # (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`) return True def __UpperCamelCase ( self) -> int: a__ =LayoutLMvaModelTester(self) a__ =ConfigTester(self , config_class=lowercase_ , hidden_size=37) def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_=False) -> Tuple: a__ =copy.deepcopy(lowercase_) if model_class in get_values(lowercase_): a__ ={ k: v.unsqueeze(1).expand(-1 , self.model_tester.num_choices , -1).contiguous() if isinstance(lowercase_ , torch.Tensor) and v.ndim > 1 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(lowercase_): a__ =torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowercase_) elif model_class in get_values(lowercase_): a__ =torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowercase_) a__ =torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowercase_) elif model_class in [ *get_values(lowercase_), ]: a__ =torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowercase_) elif model_class in [ *get_values(lowercase_), ]: a__ =torch.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowercase_ , ) return inputs_dict def __UpperCamelCase ( self) -> str: self.config_tester.run_common_tests() def __UpperCamelCase ( self) -> List[str]: a__ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_) def __UpperCamelCase ( self) -> Optional[Any]: a__ =self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: a__ =type self.model_tester.create_and_check_model(*lowercase_) def __UpperCamelCase ( self) -> List[Any]: a__ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowercase_) def __UpperCamelCase ( self) -> List[str]: a__ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowercase_) def __UpperCamelCase ( self) -> List[str]: a__ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowercase_) @slow def __UpperCamelCase ( self) -> Optional[Any]: for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a__ =LayoutLMvaModel.from_pretrained(lowercase_) self.assertIsNotNone(lowercase_) def _lowercase( ): a__ =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch class lowercase_ (unittest.TestCase ): @cached_property def __UpperCamelCase ( self) -> List[str]: return LayoutLMvaImageProcessor(apply_ocr=lowercase_) if is_vision_available() else None @slow def __UpperCamelCase ( self) -> Optional[Any]: a__ =LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base').to(lowercase_) a__ =self.default_image_processor a__ =prepare_img() a__ =image_processor(images=lowercase_ , return_tensors='pt').pixel_values.to(lowercase_) a__ =torch.tensor([[1, 2]]) a__ =torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]]).unsqueeze(0) # forward pass a__ =model( input_ids=input_ids.to(lowercase_) , bbox=bbox.to(lowercase_) , pixel_values=pixel_values.to(lowercase_) , ) # verify the logits a__ =torch.Size((1, 199, 768)) self.assertEqual(outputs.last_hidden_state.shape , lowercase_) a__ =torch.tensor( [[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]]).to(lowercase_) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase_ , atol=1e-4))
20
import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _A : """simple docstring""" def __init__( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any]=13 , __SCREAMING_SNAKE_CASE : Dict=32 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Optional[int]=3 , __SCREAMING_SNAKE_CASE : List[str]=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=[1, 2, 1] , __SCREAMING_SNAKE_CASE : List[Any]=[2, 2, 4] , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : Any=2.0 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : int=0.0 , __SCREAMING_SNAKE_CASE : Dict=0.0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : List[Any]=0.02 , __SCREAMING_SNAKE_CASE : Tuple=1e-5 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Any=10 , __SCREAMING_SNAKE_CASE : Dict=8 , ) -> List[Any]: __UpperCAmelCase =parent __UpperCAmelCase =batch_size __UpperCAmelCase =image_size __UpperCAmelCase =patch_size __UpperCAmelCase =num_channels __UpperCAmelCase =embed_dim __UpperCAmelCase =depths __UpperCAmelCase =num_heads __UpperCAmelCase =window_size __UpperCAmelCase =mlp_ratio __UpperCAmelCase =qkv_bias __UpperCAmelCase =hidden_dropout_prob __UpperCAmelCase =attention_probs_dropout_prob __UpperCAmelCase =drop_path_rate __UpperCAmelCase =hidden_act __UpperCAmelCase =use_absolute_embeddings __UpperCAmelCase =patch_norm __UpperCAmelCase =layer_norm_eps __UpperCAmelCase =initializer_range __UpperCAmelCase =is_training __UpperCAmelCase =scope __UpperCAmelCase =use_labels __UpperCAmelCase =type_sequence_label_size __UpperCAmelCase =encoder_stride def _a ( self : Tuple ) -> Optional[int]: __UpperCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCAmelCase =None if self.use_labels: __UpperCAmelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase =self.get_config() return config, pixel_values, labels def _a ( self : List[Any] ) -> Optional[Any]: return SwinvaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def _a ( self : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict ) -> Optional[int]: __UpperCAmelCase =SwinvaModel(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __UpperCAmelCase =model(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) __UpperCAmelCase =int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def _a ( self : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int ) -> Tuple: __UpperCAmelCase =SwinvaForMaskedImageModeling(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __UpperCAmelCase =model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images __UpperCAmelCase =1 __UpperCAmelCase =SwinvaForMaskedImageModeling(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __UpperCAmelCase =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __UpperCAmelCase =model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple: __UpperCAmelCase =self.type_sequence_label_size __UpperCAmelCase =SwinvaForImageClassification(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __UpperCAmelCase =model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _a ( self : List[str] ) -> Tuple: __UpperCAmelCase =self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =config_and_inputs __UpperCAmelCase ={"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _A ( UpperCamelCase , UpperCamelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase : Optional[int] = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) lowerCamelCase : Tuple = ( {'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification} if is_torch_available() else {} ) lowerCamelCase : Dict = False lowerCamelCase : Tuple = False lowerCamelCase : List[str] = False lowerCamelCase : Tuple = False def _a ( self : str ) -> str: __UpperCAmelCase =SwinvaModelTester(self ) __UpperCAmelCase =ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , embed_dim=37 ) def _a ( self : List[Any] ) -> Optional[int]: self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _a ( self : str ) -> str: __UpperCAmelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) @unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" ) def _a ( self : Tuple ) -> Tuple: pass @unittest.skip(reason="""Swinv2 does not use inputs_embeds""" ) def _a ( self : Optional[Any] ) -> int: pass def _a ( self : Tuple ) -> int: __UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __UpperCAmelCase =model.get_output_embeddings() self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) ) def _a ( self : str ) -> List[str]: __UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCAmelCase =[*signature.parameters.keys()] __UpperCAmelCase =["""pixel_values"""] self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE ) def _a ( self : Tuple ) -> Tuple: __UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase =True for model_class in self.all_model_classes: __UpperCAmelCase =True __UpperCAmelCase =False __UpperCAmelCase =True __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): __UpperCAmelCase =model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) __UpperCAmelCase =outputs.attentions __UpperCAmelCase =len(self.model_tester.depths ) self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __UpperCAmelCase =True __UpperCAmelCase =config.window_size**2 __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): __UpperCAmelCase =model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) __UpperCAmelCase =outputs.attentions self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) __UpperCAmelCase =len(__SCREAMING_SNAKE_CASE ) # Check attention is always last and order is fine __UpperCAmelCase =True __UpperCAmelCase =True __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): __UpperCAmelCase =model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) if hasattr(self.model_tester , """num_hidden_states_types""" ): __UpperCAmelCase =self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states __UpperCAmelCase =2 self.assertEqual(out_len + added_hidden_states , len(__SCREAMING_SNAKE_CASE ) ) __UpperCAmelCase =outputs.attentions self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> int: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): __UpperCAmelCase =model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) __UpperCAmelCase =outputs.hidden_states __UpperCAmelCase =getattr( self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) # Swinv2 has a different seq_length __UpperCAmelCase =( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) __UpperCAmelCase =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) __UpperCAmelCase =outputs.reshaped_hidden_states self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =reshaped_hidden_states[0].shape __UpperCAmelCase =( reshaped_hidden_states[0].view(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def _a ( self : str ) -> Union[str, Any]: __UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase =( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: __UpperCAmelCase =True self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase =True self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : Optional[int] ) -> Tuple: __UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase =3 __UpperCAmelCase =( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) __UpperCAmelCase =( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) __UpperCAmelCase =image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) __UpperCAmelCase =image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: __UpperCAmelCase =True self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase =True self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (padded_height, padded_width) ) def _a ( self : Optional[int] ) -> Tuple: __UpperCAmelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__SCREAMING_SNAKE_CASE ) def _a ( self : Tuple ) -> Dict: __UpperCAmelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE ) @slow def _a ( self : int ) -> Dict: for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase =SwinvaModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) def _a ( self : Dict ) -> Union[str, Any]: __UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase =_config_zero_init(__SCREAMING_SNAKE_CASE ) for model_class in self.all_model_classes: __UpperCAmelCase =model_class(config=__SCREAMING_SNAKE_CASE ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @require_vision @require_torch class _A ( unittest.TestCase ): """simple docstring""" @cached_property def _a ( self : Tuple ) -> Dict: return ( AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ) if is_vision_available() else None ) @slow def _a ( self : int ) -> Optional[int]: __UpperCAmelCase =SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to( __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =self.default_image_processor __UpperCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) __UpperCAmelCase =image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): __UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE ) # verify the logits __UpperCAmelCase =torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =torch.tensor([-0.3_947, -0.4_306, 0.0_026] ).to(__SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
68
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) UpperCAmelCase_ : str = { "configuration_wav2vec2": ["WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config"], "feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"], "processing_wav2vec2": ["Wav2Vec2Processor"], "tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : int = [ "WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST", "Wav2Vec2ForAudioFrameClassification", "Wav2Vec2ForCTC", "Wav2Vec2ForMaskedLM", "Wav2Vec2ForPreTraining", "Wav2Vec2ForSequenceClassification", "Wav2Vec2ForXVector", "Wav2Vec2Model", "Wav2Vec2PreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : int = [ "TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST", "TFWav2Vec2ForCTC", "TFWav2Vec2Model", "TFWav2Vec2PreTrainedModel", "TFWav2Vec2ForSequenceClassification", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Union[str, Any] = [ "FlaxWav2Vec2ForCTC", "FlaxWav2Vec2ForPreTraining", "FlaxWav2Vec2Model", "FlaxWav2Vec2PreTrainedModel", ] if TYPE_CHECKING: from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .processing_wavaveca import WavaVecaProcessor from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavaveca import ( WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaForAudioFrameClassification, WavaVecaForCTC, WavaVecaForMaskedLM, WavaVecaForPreTraining, WavaVecaForSequenceClassification, WavaVecaForXVector, WavaVecaModel, WavaVecaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, TFWavaVecaForCTC, TFWavaVecaForSequenceClassification, TFWavaVecaModel, TFWavaVecaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( FlaxWavaVecaForCTC, FlaxWavaVecaForPreTraining, FlaxWavaVecaModel, FlaxWavaVecaPreTrainedModel, ) else: import sys UpperCAmelCase_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
21
import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __A = logging.get_logger(__name__) __A = {"vocab_file": "spiece.model"} __A = { "vocab_file": { "AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model", } } __A = { "AI-Sweden/gpt-sw3-126m": 20_48, "AI-Sweden/gpt-sw3-350m": 20_48, "AI-Sweden/gpt-sw3-1.6b": 20_48, "AI-Sweden/gpt-sw3-6.7b": 20_48, "AI-Sweden/gpt-sw3-20b": 20_48, } class _A ( UpperCamelCase ): """simple docstring""" lowerCamelCase : int = VOCAB_FILES_NAMES lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase : Optional[Any] = ['input_ids', 'attention_mask'] def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : Optional[Any] , ) -> None: __UpperCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs __UpperCAmelCase =kwargs.get("""name_or_path""" ) if name_or_path is None: logger.warning( """name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,""" """ you are testing the model, this can safely be ignored""" ) __UpperCAmelCase ="""None""" # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing __UpperCAmelCase ="""<|endoftext|>""" if eos_token is None else eos_token __UpperCAmelCase ="""<unk>""" if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: __UpperCAmelCase =unk_token if pad_token is None else pad_token __UpperCAmelCase =eos_token if bos_token is None else bos_token else: __UpperCAmelCase ="""<pad>""" if pad_token is None else pad_token __UpperCAmelCase ="""<s>""" if bos_token is None else bos_token super().__init__( do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , ) __UpperCAmelCase =do_lower_case __UpperCAmelCase =remove_space __UpperCAmelCase =keep_accents __UpperCAmelCase =vocab_file __UpperCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__SCREAMING_SNAKE_CASE ) # Used for whitespace normalization in input texts # fmt : off __UpperCAmelCase ={""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """„"""} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing __UpperCAmelCase =re.compile( f'''[{"".join(map(__SCREAMING_SNAKE_CASE , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]''' ) def __getstate__( self : Any ) -> str: __UpperCAmelCase =self.__dict__.copy() __UpperCAmelCase =None return state def __setstate__( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]: __UpperCAmelCase =d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): __UpperCAmelCase ={} __UpperCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def _a ( self : Union[str, Any] ) -> int: return len(self.sp_model ) def _a ( self : Dict , __SCREAMING_SNAKE_CASE : str ) -> str: __UpperCAmelCase =self.non_printing_characters_re.sub("""""" , __SCREAMING_SNAKE_CASE ) # Normalize whitespaces __UpperCAmelCase ="""""".join([char if char not in self.whitespaces else """ """ for char in text] ) # NFC Unicode normalization __UpperCAmelCase =unicodedata.normalize("""NFC""" , __SCREAMING_SNAKE_CASE ) return text def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[str]: __UpperCAmelCase =self.preprocess_text(__SCREAMING_SNAKE_CASE ) return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE ) def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str ) -> int: return self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE ) def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : int ) -> str: return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE ) @staticmethod def _a ( __SCREAMING_SNAKE_CASE : str ) -> str: return out_string def _a ( self : Any , __SCREAMING_SNAKE_CASE : List[str] ) -> str: __UpperCAmelCase =[] __UpperCAmelCase ="""""" __UpperCAmelCase =False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token __UpperCAmelCase =True __UpperCAmelCase =[] else: current_sub_tokens.append(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =False out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) return out_string def _a ( self : Any ) -> Dict[str, int]: __UpperCAmelCase ={self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _a ( self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__SCREAMING_SNAKE_CASE ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __UpperCAmelCase =os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(__SCREAMING_SNAKE_CASE , """wb""" ) as fi: __UpperCAmelCase =self.sp_model.serialized_model_proto() fi.write(__SCREAMING_SNAKE_CASE ) return (out_vocab_file,) def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : Union[str, bool] = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]: if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __UpperCAmelCase =self.preprocess_text(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =self.sp_model.encode(__SCREAMING_SNAKE_CASE ) else: __UpperCAmelCase =[self.preprocess_text(__SCREAMING_SNAKE_CASE ) for t in text] __UpperCAmelCase =self.sp_model.encode(__SCREAMING_SNAKE_CASE ) if return_tensors is True or return_tensors == "pt": __UpperCAmelCase =torch.tensor(__SCREAMING_SNAKE_CASE ) return token_ids def _a ( self : str , __SCREAMING_SNAKE_CASE : Union[int, List[int]] ) -> str: return self.sp_model.decode(__SCREAMING_SNAKE_CASE ) def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : "Conversation" ) -> List[int]: __UpperCAmelCase =[f'''User: {text}''' if is_user else f'''Bot: {text}''' for is_user, text in conversation.iter_texts()] __UpperCAmelCase =( f'''{self.eos_token}{self.bos_token}''' + f'''{self.bos_token}'''.join(__SCREAMING_SNAKE_CASE ) + f'''{self.bos_token}Bot:''' ) return self.encode(text=__SCREAMING_SNAKE_CASE )
68
0
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _snake_case : str = logging.get_logger(__name__) _snake_case : Tuple = {'vocab_file': 'sentencepiece.model'} _snake_case : List[Any] = { 'vocab_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model', }, } _snake_case : str = { 'google/rembert': 256, } class A ( _a ): lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : List[str] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Optional[Any]="[CLS]" , lowerCAmelCase_ : Any="[SEP]" , lowerCAmelCase_ : Dict="[UNK]" , lowerCAmelCase_ : Tuple="[SEP]" , lowerCAmelCase_ : List[str]="[PAD]" , lowerCAmelCase_ : Optional[int]="[CLS]" , lowerCAmelCase_ : Optional[Any]="[MASK]" , **lowerCAmelCase_ : List[Any] , ) -> int: """simple docstring""" super().__init__( do_lower_case=lowerCAmelCase_ , remove_space=lowerCAmelCase_ , keep_accents=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , **lowerCAmelCase_ , ) _a = do_lower_case _a = remove_space _a = keep_accents _a = vocab_file _a = spm.SentencePieceProcessor() self.sp_model.Load(lowerCAmelCase_ ) @property def __lowerCAmelCase ( self : str ) -> Any: """simple docstring""" return len(self.sp_model ) def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]: """simple docstring""" _a = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : List[Any] ) -> List[Any]: """simple docstring""" _a = self.__dict__.copy() _a = None return state def __setstate__( self : Optional[Any] , lowerCAmelCase_ : List[Any] ) -> int: """simple docstring""" _a = d _a = spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file ) def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str]=False ) -> Tuple: """simple docstring""" _a = self.sp_model.EncodeAsPieces(lowerCAmelCase_ ) return pieces def __lowerCAmelCase ( self : int , lowerCAmelCase_ : Dict ) -> Union[str, Any]: """simple docstring""" return self.sp_model.PieceToId(lowerCAmelCase_ ) def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Optional[Any] ) -> int: """simple docstring""" return self.sp_model.IdToPiece(lowerCAmelCase_ ) def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : Any ) -> Union[str, Any]: """simple docstring""" _a = self.sp_model.decode_pieces(lowerCAmelCase_ ) return out_string def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" _a = [self.sep_token_id] _a = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(lowerCAmelCase_ )) + [1] + ([0] * len(lowerCAmelCase_ )) + [1] return [1] + ([0] * len(lowerCAmelCase_ )) + [1] def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" _a = [self.sep_token_id] _a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(lowerCAmelCase_ ): logger.error('''Vocabulary path ({}) should be a directory'''.format(lowerCAmelCase_ ) ) return _a = os.path.join( lowerCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ): copyfile(self.vocab_file , lowerCAmelCase_ ) return (out_vocab_file,)
22
import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow __A = logging.getLogger() @unittest.skip('Temporarily disable the doc tests.' ) @require_torch @require_tf @slow class _A ( unittest.TestCase ): """simple docstring""" def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Path , __SCREAMING_SNAKE_CASE : Union[str, None] = None , __SCREAMING_SNAKE_CASE : Union[List[str], None] = None , __SCREAMING_SNAKE_CASE : Union[str, List[str], None] = None , __SCREAMING_SNAKE_CASE : bool = True , ) -> List[str]: __UpperCAmelCase =[file for file in os.listdir(__SCREAMING_SNAKE_CASE ) if os.path.isfile(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )] if identifier is not None: __UpperCAmelCase =[file for file in files if identifier in file] if n_identifier is not None: if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): for n_ in n_identifier: __UpperCAmelCase =[file for file in files if n_ not in file] else: __UpperCAmelCase =[file for file in files if n_identifier not in file] __UpperCAmelCase =ignore_files or [] ignore_files.append("""__init__.py""" ) __UpperCAmelCase =[file for file in files if file not in ignore_files] for file in files: # Open all files print("""Testing""" , __SCREAMING_SNAKE_CASE ) if only_modules: __UpperCAmelCase =file.split(""".""" )[0] try: __UpperCAmelCase =getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =doctest.DocTestSuite(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =unittest.TextTestRunner().run(__SCREAMING_SNAKE_CASE ) self.assertIs(len(result.failures ) , 0 ) except AttributeError: logger.info(f'''{module_identifier} is not a module.''' ) else: __UpperCAmelCase =doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed , 0 ) def _a ( self : Optional[Any] ) -> List[str]: __UpperCAmelCase =Path("""src/transformers""" ) __UpperCAmelCase ="""modeling""" __UpperCAmelCase =[ """modeling_ctrl.py""", """modeling_tf_ctrl.py""", ] self.analyze_directory(__SCREAMING_SNAKE_CASE , identifier=__SCREAMING_SNAKE_CASE , ignore_files=__SCREAMING_SNAKE_CASE ) def _a ( self : Tuple ) -> Optional[int]: __UpperCAmelCase =Path("""src/transformers""" ) __UpperCAmelCase ="""tokenization""" self.analyze_directory(__SCREAMING_SNAKE_CASE , identifier=__SCREAMING_SNAKE_CASE ) def _a ( self : Optional[Any] ) -> Optional[Any]: __UpperCAmelCase =Path("""src/transformers""" ) __UpperCAmelCase ="""configuration""" self.analyze_directory(__SCREAMING_SNAKE_CASE , identifier=__SCREAMING_SNAKE_CASE ) def _a ( self : List[Any] ) -> Tuple: __UpperCAmelCase =Path("""src/transformers""" ) __UpperCAmelCase =["""configuration""", """modeling""", """tokenization"""] self.analyze_directory(__SCREAMING_SNAKE_CASE , n_identifier=__SCREAMING_SNAKE_CASE ) def _a ( self : Any ) -> Tuple: __UpperCAmelCase =Path("""docs/source""" ) __UpperCAmelCase =["""favicon.ico"""] self.analyze_directory(__SCREAMING_SNAKE_CASE , ignore_files=__SCREAMING_SNAKE_CASE , only_modules=__SCREAMING_SNAKE_CASE )
68
0
import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def _snake_case (__lowercase , __lowercase , __lowercase): UpperCamelCase_ = ('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value') UpperCamelCase_ = ( ('layer.', 'layer_'), ('word_embeddings.weight', 'word_embeddings'), ('position_embeddings.weight', 'position_embeddings'), ('token_type_embeddings.weight', 'token_type_embeddings'), ('.', '/'), ('LayerNorm/weight', 'LayerNorm/gamma'), ('LayerNorm/bias', 'LayerNorm/beta'), ('weight', 'kernel'), ) if not os.path.isdir(__lowercase): os.makedirs(__lowercase) UpperCamelCase_ = model.state_dict() def to_tf_var_name(__lowercase): for patt, repl in iter(__lowercase): UpperCamelCase_ = name.replace(__lowercase , __lowercase) return f"""bert/{name}""" def create_tf_var(__lowercase , __lowercase , __lowercase): UpperCamelCase_ = tf.dtypes.as_dtype(tensor.dtype) UpperCamelCase_ = tf.get_variable(dtype=__lowercase , shape=tensor.shape , name=__lowercase , initializer=tf.zeros_initializer()) session.run(tf.variables_initializer([tf_var])) session.run(__lowercase) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: UpperCamelCase_ = to_tf_var_name(__lowercase) UpperCamelCase_ = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose): UpperCamelCase_ = torch_tensor.T UpperCamelCase_ = create_tf_var(tensor=__lowercase , name=__lowercase , session=__lowercase) tf.keras.backend.set_value(__lowercase , __lowercase) UpperCamelCase_ = session.run(__lowercase) print(f"""Successfully created {tf_name}: {np.allclose(__lowercase , __lowercase)}""") UpperCamelCase_ = tf.train.Saver(tf.trainable_variables()) saver.save(__lowercase , os.path.join(__lowercase , model_name.replace('-' , '_') + '.ckpt')) def _snake_case (__lowercase=None): UpperCamelCase_ = argparse.ArgumentParser() parser.add_argument('--model_name' , type=__lowercase , required=__lowercase , help='model name e.g. bert-base-uncased') parser.add_argument( '--cache_dir' , type=__lowercase , default=__lowercase , required=__lowercase , help='Directory containing pytorch model') parser.add_argument('--pytorch_model_path' , type=__lowercase , required=__lowercase , help='/path/to/<pytorch-model-name>.bin') parser.add_argument('--tf_cache_dir' , type=__lowercase , required=__lowercase , help='Directory in which to save tensorflow model') UpperCamelCase_ = parser.parse_args(__lowercase) UpperCamelCase_ = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=__lowercase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name) if __name__ == "__main__": main()
23
import random import unittest import numpy as np import transformers from transformers import is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax if is_flax_available(): import os import jax.numpy as jnp from jax import jit from transformers import AutoTokenizer, FlaxAutoModelForCausalLM from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model __A = "0.12" # assumed parallelism: 8 if is_torch_available(): import torch def lowercase__ ( A_: int , A_: Optional[Any] , A_: List[str]=None ) -> List[str]: """simple docstring""" if rng is None: __UpperCAmelCase =random.Random() __UpperCAmelCase =1 for dim in shape: total_dims *= dim __UpperCAmelCase =[] for _ in range(A_ ): values.append(rng.randint(0 , vocab_size - 1 ) ) __UpperCAmelCase =np.array(A_ , dtype=jnp.intaa ).reshape(A_ ) return output def lowercase__ ( A_: List[str] , A_: List[str]=None ) -> Any: """simple docstring""" __UpperCAmelCase =ids_tensor(A_ , vocab_size=2 , rng=A_ ) # make sure that at least one token is attended to for each batch __UpperCAmelCase =1 return attn_mask @require_flax class _A : """simple docstring""" lowerCamelCase : Optional[Any] = None lowerCamelCase : int = () def _a ( self : str ) -> Tuple: __UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() # cut to half length & take max batch_size 3 __UpperCAmelCase =2 __UpperCAmelCase =inputs["""input_ids"""].shape[-1] // 2 __UpperCAmelCase =inputs["""input_ids"""][:max_batch_size, :sequence_length] __UpperCAmelCase =jnp.ones_like(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =attention_mask[:max_batch_size, :sequence_length] # generate max 5 tokens __UpperCAmelCase =input_ids.shape[-1] + 5 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` __UpperCAmelCase =config.eos_token_id return config, input_ids, attention_mask, max_length @is_pt_flax_cross_test def _a ( self : Union[str, Any] ) -> Optional[int]: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() __UpperCAmelCase =False __UpperCAmelCase =max_length __UpperCAmelCase =0 for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model_class.__name__[4:] # Skip the "Flax" at the beginning __UpperCAmelCase =getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =pt_model_class(__SCREAMING_SNAKE_CASE ).eval() __UpperCAmelCase =load_flax_weights_in_pytorch_model(__SCREAMING_SNAKE_CASE , flax_model.params ) __UpperCAmelCase =flax_model.generate(__SCREAMING_SNAKE_CASE ).sequences __UpperCAmelCase =pt_model.generate(torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.long ) ) if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]: __UpperCAmelCase =flax_generation_outputs[:, : pt_generation_outputs.shape[-1]] self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() ) def _a ( self : Optional[int] ) -> Dict: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() __UpperCAmelCase =False __UpperCAmelCase =max_length for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =jit(model.generate ) __UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : Union[str, Any] ) -> List[str]: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() __UpperCAmelCase =True __UpperCAmelCase =max_length for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =jit(model.generate ) __UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : List[Any] ) -> Any: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() __UpperCAmelCase =False __UpperCAmelCase =max_length __UpperCAmelCase =2 for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =jit(model.generate ) __UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : Any ) -> Tuple: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() __UpperCAmelCase =False __UpperCAmelCase =max_length __UpperCAmelCase =2 __UpperCAmelCase =2 for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences ) def _a ( self : Union[str, Any] ) -> List[Any]: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() __UpperCAmelCase =True __UpperCAmelCase =max_length __UpperCAmelCase =0.8 __UpperCAmelCase =10 __UpperCAmelCase =0.3 __UpperCAmelCase =1 __UpperCAmelCase =8 __UpperCAmelCase =9 for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =jit(model.generate ) __UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : Union[str, Any] ) -> Optional[Any]: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() __UpperCAmelCase =max_length __UpperCAmelCase =1 __UpperCAmelCase =8 __UpperCAmelCase =9 for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =jit(model.generate ) __UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : Optional[int] ) -> Any: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() __UpperCAmelCase =max_length __UpperCAmelCase =2 __UpperCAmelCase =1 __UpperCAmelCase =8 __UpperCAmelCase =9 for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =jit(model.generate ) __UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : List[str] ) -> Dict: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() # pad attention mask on the left __UpperCAmelCase =attention_mask.at[(0, 0)].set(0 ) __UpperCAmelCase =False __UpperCAmelCase =max_length for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =jit(model.generate ) __UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : Dict ) -> Tuple: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() # pad attention mask on the left __UpperCAmelCase =attention_mask.at[(0, 0)].set(0 ) __UpperCAmelCase =True __UpperCAmelCase =max_length for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =jit(model.generate ) __UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : Dict ) -> Tuple: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() # pad attention mask on the left __UpperCAmelCase =attention_mask.at[(0, 0)].set(0 ) __UpperCAmelCase =2 __UpperCAmelCase =max_length for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =jit(model.generate ) __UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) @require_flax class _A ( unittest.TestCase ): """simple docstring""" def _a ( self : int ) -> Any: __UpperCAmelCase =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-bert""" ) __UpperCAmelCase =FlaxAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" ) __UpperCAmelCase ="""Hello world""" __UpperCAmelCase =tokenizer(__SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_ids # typos are quickly detected (the correct argument is `do_sample`) with self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , """do_samples""" ): model.generate(__SCREAMING_SNAKE_CASE , do_samples=__SCREAMING_SNAKE_CASE ) # arbitrary arguments that will not be used anywhere are also not accepted with self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , """foo""" ): __UpperCAmelCase ={"""foo""": """bar"""} model.generate(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
68
0
'''simple docstring''' class lowerCAmelCase : def __init__( self ) -> Any: '''simple docstring''' __snake_case = {} def lowerCAmelCase ( self ) -> None: '''simple docstring''' print(self.vertex ) for i in self.vertex: print(__SCREAMING_SNAKE_CASE , ''' -> ''' , ''' -> '''.join([str(__SCREAMING_SNAKE_CASE ) for j in self.vertex[i]] ) ) def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> None: '''simple docstring''' if from_vertex in self.vertex: self.vertex[from_vertex].append(__SCREAMING_SNAKE_CASE ) else: # else make a new vertex __snake_case = [to_vertex] def lowerCAmelCase ( self ) -> None: '''simple docstring''' __snake_case = [False] * len(self.vertex ) # call the recursive helper function for i in range(len(self.vertex ) ): if not visited[i]: self.dfs_recursive(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> None: '''simple docstring''' __snake_case = True print(__SCREAMING_SNAKE_CASE , end=''' ''' ) # Recur for all the vertices that are adjacent to this node for i in self.vertex: if not visited[i]: self.dfs_recursive(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCAmelCase_ : List[Any] = Graph() g.add_edge(0, 1) g.add_edge(0, 2) g.add_edge(1, 2) g.add_edge(2, 0) g.add_edge(2, 3) g.add_edge(3, 3) g.print_graph() print('''DFS:''') g.dfs() # OUTPUT: # 0 -> 1 -> 2 # 1 -> 2 # 2 -> 0 -> 3 # 3 -> 3 # DFS: # 0 1 2 3
24
from __future__ import annotations from collections.abc import Iterator class _A : """simple docstring""" def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> None: __UpperCAmelCase =value __UpperCAmelCase =None __UpperCAmelCase =None class _A : """simple docstring""" def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Node ) -> None: __UpperCAmelCase =tree def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Node | None ) -> int: if node is None: return 0 return node.value + ( self.depth_first_search(node.left ) + self.depth_first_search(node.right ) ) def __iter__( self : int ) -> Iterator[int]: yield self.depth_first_search(self.tree ) if __name__ == "__main__": import doctest doctest.testmod()
68
0
from __future__ import annotations import math class _UpperCamelCase : '''simple docstring''' def __init__( self : Dict , a : int ) -> None: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = size # approximate the overall size of segment tree with given value SCREAMING_SNAKE_CASE : Any = [0 for i in range(0 , 4 * size )] # create array to store lazy update SCREAMING_SNAKE_CASE : Union[str, Any] = [0 for i in range(0 , 4 * size )] SCREAMING_SNAKE_CASE : Any = [0 for i in range(0 , 4 * size )] # flag for lazy update def __UpperCamelCase ( self : Tuple , a : int ) -> int: """simple docstring""" return idx * 2 def __UpperCamelCase ( self : str , a : int ) -> int: """simple docstring""" return idx * 2 + 1 def __UpperCamelCase ( self : int , a : int , a : int , a : int , a : list[int] ) -> None: """simple docstring""" if left_element == right_element: SCREAMING_SNAKE_CASE : int = a[left_element - 1] else: SCREAMING_SNAKE_CASE : Optional[int] = (left_element + right_element) // 2 self.build(self.left(a ) , a , a , a ) self.build(self.right(a ) , mid + 1 , a , a ) SCREAMING_SNAKE_CASE : List[Any] = max( self.segment_tree[self.left(a )] , self.segment_tree[self.right(a )] ) def __UpperCamelCase ( self : Optional[Any] , a : int , a : int , a : int , a : int , a : int , a : int ) -> bool: """simple docstring""" if self.flag[idx] is True: SCREAMING_SNAKE_CASE : Any = self.lazy[idx] SCREAMING_SNAKE_CASE : List[str] = False if left_element != right_element: SCREAMING_SNAKE_CASE : Optional[Any] = self.lazy[idx] SCREAMING_SNAKE_CASE : int = self.lazy[idx] SCREAMING_SNAKE_CASE : Any = True SCREAMING_SNAKE_CASE : List[Any] = True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: SCREAMING_SNAKE_CASE : Optional[Any] = val if left_element != right_element: SCREAMING_SNAKE_CASE : str = val SCREAMING_SNAKE_CASE : str = val SCREAMING_SNAKE_CASE : Tuple = True SCREAMING_SNAKE_CASE : Optional[Any] = True return True SCREAMING_SNAKE_CASE : int = (left_element + right_element) // 2 self.update(self.left(a ) , a , a , a , a , a ) self.update(self.right(a ) , mid + 1 , a , a , a , a ) SCREAMING_SNAKE_CASE : Optional[int] = max( self.segment_tree[self.left(a )] , self.segment_tree[self.right(a )] ) return True def __UpperCamelCase ( self : Dict , a : int , a : int , a : int , a : int , a : int ) -> int | float: """simple docstring""" if self.flag[idx] is True: SCREAMING_SNAKE_CASE : int = self.lazy[idx] SCREAMING_SNAKE_CASE : List[Any] = False if left_element != right_element: SCREAMING_SNAKE_CASE : Optional[Any] = self.lazy[idx] SCREAMING_SNAKE_CASE : Optional[Any] = self.lazy[idx] SCREAMING_SNAKE_CASE : Optional[Any] = True SCREAMING_SNAKE_CASE : Union[str, Any] = True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] SCREAMING_SNAKE_CASE : Dict = (left_element + right_element) // 2 SCREAMING_SNAKE_CASE : Tuple = self.query(self.left(a ) , a , a , a , a ) SCREAMING_SNAKE_CASE : Tuple = self.query(self.right(a ) , mid + 1 , a , a , a ) return max(a , a ) def __str__( self : str ) -> str: """simple docstring""" return str([self.query(1 , 1 , self.size , a , a ) for i in range(1 , self.size + 1 )] ) if __name__ == "__main__": a_ = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8] a_ = 15 a_ = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 11)) print(segt.query(1, 1, size, 7, 12)) segt.update(1, 1, size, 1, 3, 111) print(segt.query(1, 1, size, 1, 15)) segt.update(1, 1, size, 7, 8, 235) print(segt)
25
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType from ...utils.imports import is_botoa_available from .config_args import SageMakerConfig from .config_utils import ( DYNAMO_BACKENDS, _ask_field, _ask_options, _convert_dynamo_backend, _convert_mixed_precision, _convert_sagemaker_distributed_mode, _convert_yes_no_to_bool, ) if is_botoa_available(): import botoa # noqa: F401 def lowercase__ ( A_: Union[str, Any] ) -> List[Any]: """simple docstring""" __UpperCAmelCase =botoa.client("""iam""" ) __UpperCAmelCase ={ """Version""": """2012-10-17""", """Statement""": [ {"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""} ], } try: # create the role, associated with the chosen trust policy iam_client.create_role( RoleName=A_ , AssumeRolePolicyDocument=json.dumps(A_ , indent=2 ) ) __UpperCAmelCase ={ """Version""": """2012-10-17""", """Statement""": [ { """Effect""": """Allow""", """Action""": [ """sagemaker:*""", """ecr:GetDownloadUrlForLayer""", """ecr:BatchGetImage""", """ecr:BatchCheckLayerAvailability""", """ecr:GetAuthorizationToken""", """cloudwatch:PutMetricData""", """cloudwatch:GetMetricData""", """cloudwatch:GetMetricStatistics""", """cloudwatch:ListMetrics""", """logs:CreateLogGroup""", """logs:CreateLogStream""", """logs:DescribeLogStreams""", """logs:PutLogEvents""", """logs:GetLogEvents""", """s3:CreateBucket""", """s3:ListBucket""", """s3:GetBucketLocation""", """s3:GetObject""", """s3:PutObject""", ], """Resource""": """*""", } ], } # attach policy to role iam_client.put_role_policy( RoleName=A_ , PolicyName=F'''{role_name}_policy_permission''' , PolicyDocument=json.dumps(A_ , indent=2 ) , ) except iam_client.exceptions.EntityAlreadyExistsException: print(F'''role {role_name} already exists. Using existing one''' ) def lowercase__ ( A_: Dict ) -> Any: """simple docstring""" __UpperCAmelCase =botoa.client("""iam""" ) return iam_client.get_role(RoleName=A_ )["Role"]["Arn"] def lowercase__ ( ) -> Union[str, Any]: """simple docstring""" __UpperCAmelCase =_ask_options( """How do you want to authorize?""" , ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """] , A_ , ) __UpperCAmelCase =None if credentials_configuration == 0: __UpperCAmelCase =_ask_field("""Enter your AWS Profile name: [default] """ , default="""default""" ) __UpperCAmelCase =aws_profile else: print( """Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,""" """`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" ) __UpperCAmelCase =_ask_field("""AWS Access Key ID: """ ) __UpperCAmelCase =aws_access_key_id __UpperCAmelCase =_ask_field("""AWS Secret Access Key: """ ) __UpperCAmelCase =aws_secret_access_key __UpperCAmelCase =_ask_field("""Enter your AWS Region: [us-east-1]""" , default="""us-east-1""" ) __UpperCAmelCase =aws_region __UpperCAmelCase =_ask_options( """Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""" , ["""Provide IAM Role name""", """Create new IAM role using credentials"""] , A_ , ) if role_management == 0: __UpperCAmelCase =_ask_field("""Enter your IAM role name: """ ) else: __UpperCAmelCase ="""accelerate_sagemaker_execution_role""" print(F'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' ) _create_iam_role_for_sagemaker(A_ ) __UpperCAmelCase =_ask_field( """Do you want to use custom Docker image? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , ) __UpperCAmelCase =None if is_custom_docker_image: __UpperCAmelCase =_ask_field("""Enter your Docker image: """ , lambda A_ : str(A_ ).lower() ) __UpperCAmelCase =_ask_field( """Do you want to provide SageMaker input channels with data locations? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , ) __UpperCAmelCase =None if is_sagemaker_inputs_enabled: __UpperCAmelCase =_ask_field( """Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """ , lambda A_ : str(A_ ).lower() , ) __UpperCAmelCase =_ask_field( """Do you want to enable SageMaker metrics? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , ) __UpperCAmelCase =None if is_sagemaker_metrics_enabled: __UpperCAmelCase =_ask_field( """Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """ , lambda A_ : str(A_ ).lower() , ) __UpperCAmelCase =_ask_options( """What is the distributed mode?""" , ["""No distributed training""", """Data parallelism"""] , _convert_sagemaker_distributed_mode , ) __UpperCAmelCase ={} __UpperCAmelCase =_ask_field( """Do you wish to optimize your script with torch dynamo?[yes/NO]:""" , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , ) if use_dynamo: __UpperCAmelCase ="""dynamo_""" __UpperCAmelCase =_ask_options( """Which dynamo backend would you like to use?""" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , ) __UpperCAmelCase =_ask_field( """Do you want to customize the defaults sent to torch.compile? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , ) if use_custom_options: __UpperCAmelCase =_ask_options( """Which mode do you want to use?""" , A_ , lambda A_ : TORCH_DYNAMO_MODES[int(A_ )] , default="""default""" , ) __UpperCAmelCase =_ask_field( """Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , ) __UpperCAmelCase =_ask_field( """Do you want to enable dynamic shape tracing? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , ) __UpperCAmelCase ="""Which EC2 instance type you want to use for your training?""" if distributed_type != SageMakerDistributedType.NO: __UpperCAmelCase =_ask_options( A_ , A_ , lambda A_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(A_ )] ) else: eca_instance_query += "? [ml.p3.2xlarge]:" __UpperCAmelCase =_ask_field(A_ , lambda A_ : str(A_ ).lower() , default="""ml.p3.2xlarge""" ) __UpperCAmelCase =1 if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL): __UpperCAmelCase =_ask_field( """How many machines do you want use? [1]: """ , A_ , default=1 , ) __UpperCAmelCase =_ask_options( """Do you wish to use FP16 or BF16 (mixed precision)?""" , ["""no""", """fp16""", """bf16""", """fp8"""] , _convert_mixed_precision , ) if use_dynamo and mixed_precision == "no": print( """Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" ) return SageMakerConfig( image_uri=A_ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=A_ , use_cpu=A_ , dynamo_config=A_ , eca_instance_type=A_ , profile=A_ , region=A_ , iam_role_name=A_ , mixed_precision=A_ , num_machines=A_ , sagemaker_inputs_file=A_ , sagemaker_metrics_file=A_ , )
68
0
'''simple docstring''' import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class _A ( unittest.TestCase ): def lowercase__ ( self : Tuple ) -> List[str]: """simple docstring""" debug_launcher(test_script.main ) def lowercase__ ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" debug_launcher(test_ops.main )
26
from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"} class _A ( UpperCamelCase ): """simple docstring""" lowerCamelCase : Tuple = 'ctrl' lowerCamelCase : Any = ['past_key_values'] lowerCamelCase : Optional[int] = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=246534 , __SCREAMING_SNAKE_CASE : int=256 , __SCREAMING_SNAKE_CASE : Optional[Any]=1280 , __SCREAMING_SNAKE_CASE : Optional[Any]=8192 , __SCREAMING_SNAKE_CASE : int=48 , __SCREAMING_SNAKE_CASE : Union[str, Any]=16 , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=1e-6 , __SCREAMING_SNAKE_CASE : List[str]=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , **__SCREAMING_SNAKE_CASE : int , ) -> Any: __UpperCAmelCase =vocab_size __UpperCAmelCase =n_positions __UpperCAmelCase =n_embd __UpperCAmelCase =n_layer __UpperCAmelCase =n_head __UpperCAmelCase =dff __UpperCAmelCase =resid_pdrop __UpperCAmelCase =embd_pdrop __UpperCAmelCase =layer_norm_epsilon __UpperCAmelCase =initializer_range __UpperCAmelCase =use_cache super().__init__(**__SCREAMING_SNAKE_CASE )
68
0
import numpy as np __A : int = [ ["a", "b", "c", "d", "e"], ["f", "g", "h", "i", "k"], ["l", "m", "n", "o", "p"], ["q", "r", "s", "t", "u"], ["v", "w", "x", "y", "z"], ] class lowerCamelCase: '''simple docstring''' def __init__( self ): _A = np.array(snake_case_ ) def lowerCAmelCase__ ( self , snake_case_ ): _A, _A = np.where(letter == self.SQUARE ) _A = np.concatenate([indexa + 1, indexa + 1] ) return indexes def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ): _A = self.SQUARE[indexa - 1, indexa - 1] return letter def lowerCAmelCase__ ( self , snake_case_ ): _A = message.lower() _A = message.replace(' ' , '' ) _A = message.replace('j' , 'i' ) _A = np.empty((2, len(snake_case_ )) ) for letter_index in range(len(snake_case_ ) ): _A = self.letter_to_numbers(message[letter_index] ) _A = numbers[0] _A = numbers[1] _A = first_step.reshape(2 * len(snake_case_ ) ) _A = '' for numbers_index in range(len(snake_case_ ) ): _A = int(second_step[numbers_index * 2] ) _A = int(second_step[(numbers_index * 2) + 1] ) _A = self.numbers_to_letter(snake_case_ , snake_case_ ) _A = encoded_message + letter return encoded_message def lowerCAmelCase__ ( self , snake_case_ ): _A = message.lower() message.replace(' ' , '' ) _A = np.empty(2 * len(snake_case_ ) ) for letter_index in range(len(snake_case_ ) ): _A = self.letter_to_numbers(message[letter_index] ) _A = numbers[0] _A = numbers[1] _A = first_step.reshape((2, len(snake_case_ )) ) _A = '' for numbers_index in range(len(snake_case_ ) ): _A = int(second_step[0, numbers_index] ) _A = int(second_step[1, numbers_index] ) _A = self.numbers_to_letter(snake_case_ , snake_case_ ) _A = decoded_message + letter return decoded_message
27
import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() __A = logging.get_logger(__name__) __A = [ ["attention", "attn"], ["encoder_attention", "encoder_attn"], ["q_lin", "q_proj"], ["k_lin", "k_proj"], ["v_lin", "v_proj"], ["out_lin", "out_proj"], ["norm_embeddings", "layernorm_embedding"], ["position_embeddings", "embed_positions"], ["embeddings", "embed_tokens"], ["ffn.lin", "fc"], ] def lowercase__ ( A_: Optional[Any] ) -> Union[str, Any]: """simple docstring""" if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: __UpperCAmelCase =k.replace(A_ , A_ ) if k.startswith("""encoder""" ): __UpperCAmelCase =k.replace(""".attn""" , """.self_attn""" ) __UpperCAmelCase =k.replace("""norm1""" , """self_attn_layer_norm""" ) __UpperCAmelCase =k.replace("""norm2""" , """final_layer_norm""" ) elif k.startswith("""decoder""" ): __UpperCAmelCase =k.replace("""norm1""" , """self_attn_layer_norm""" ) __UpperCAmelCase =k.replace("""norm2""" , """encoder_attn_layer_norm""" ) __UpperCAmelCase =k.replace("""norm3""" , """final_layer_norm""" ) return k def lowercase__ ( A_: Tuple ) -> str: """simple docstring""" __UpperCAmelCase =[ """model.encoder.layernorm_embedding.weight""", """model.encoder.layernorm_embedding.bias""", """model.decoder.layernorm_embedding.weight""", """model.decoder.layernorm_embedding.bias""", ] for k in keys: __UpperCAmelCase =sd.pop(A_ ) __UpperCAmelCase =k.replace("""layernorm_embedding""" , """layer_norm""" ) assert new_k not in sd __UpperCAmelCase =v __A = ["START"] @torch.no_grad() def lowercase__ ( A_: List[Any] , A_: str , A_: int ) -> Optional[int]: """simple docstring""" __UpperCAmelCase =torch.load(A_ , map_location="""cpu""" ) __UpperCAmelCase =model["""model"""] __UpperCAmelCase =BlenderbotConfig.from_json_file(A_ ) __UpperCAmelCase =BlenderbotForConditionalGeneration(A_ ) __UpperCAmelCase =m.model.state_dict().keys() __UpperCAmelCase =[] __UpperCAmelCase ={} for k, v in sd.items(): if k in IGNORE_KEYS: continue __UpperCAmelCase =rename_state_dict_key(A_ ) if new_k not in valid_keys: failures.append([k, new_k] ) else: __UpperCAmelCase =v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(A_ ) m.model.load_state_dict(A_ , strict=A_ ) m.half() m.save_pretrained(A_ ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin") parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.") parser.add_argument( "--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use" ) __A = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
68
0
'''simple docstring''' import copy import random from transformers import CLIPTokenizer class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self, *A, **A ): '''simple docstring''' super().__init__(*A, **A ) SCREAMING_SNAKE_CASE : Tuple = {} def UpperCamelCase_ ( self, A, *A, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = super().add_tokens(A, *A, **A ) if num_added_tokens == 0: raise ValueError( F"The tokenizer already contains the token {placeholder_token}. Please pass a different" ' `placeholder_token` that is not already in the tokenizer.' ) def UpperCamelCase_ ( self, A, *A, A=1, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = [] if num_vec_per_token == 1: self.try_adding_tokens(A, *A, **A ) output.append(A ) else: SCREAMING_SNAKE_CASE : Dict = [] for i in range(A ): SCREAMING_SNAKE_CASE : str = placeholder_token + F"_{i}" self.try_adding_tokens(A, *A, **A ) output.append(A ) # handle cases where there is a new placeholder token that contains the current placeholder token but is larger for token in self.token_map: if token in placeholder_token: raise ValueError( F"The tokenizer already has placeholder token {token} that can get confused with" F" {placeholder_token}keep placeholder tokens independent" ) SCREAMING_SNAKE_CASE : Dict = output def UpperCamelCase_ ( self, A, A=False, A=1.0 ): '''simple docstring''' if isinstance(A, A ): SCREAMING_SNAKE_CASE : List[Any] = [] for i in range(len(A ) ): output.append(self.replace_placeholder_tokens_in_text(text[i], vector_shuffle=A ) ) return output for placeholder_token in self.token_map: if placeholder_token in text: SCREAMING_SNAKE_CASE : Tuple = self.token_map[placeholder_token] SCREAMING_SNAKE_CASE : int = tokens[: 1 + int(len(A ) * prop_tokens_to_load )] if vector_shuffle: SCREAMING_SNAKE_CASE : Union[str, Any] = copy.copy(A ) random.shuffle(A ) SCREAMING_SNAKE_CASE : Dict = text.replace(A, ' '.join(A ) ) return text def __call__( self, A, *A, A=False, A=1.0, **A ): '''simple docstring''' return super().__call__( self.replace_placeholder_tokens_in_text( A, vector_shuffle=A, prop_tokens_to_load=A ), *A, **A, ) def UpperCamelCase_ ( self, A, *A, A=False, A=1.0, **A ): '''simple docstring''' return super().encode( self.replace_placeholder_tokens_in_text( A, vector_shuffle=A, prop_tokens_to_load=A ), *A, **A, )
28
from itertools import permutations def lowercase__ ( A_: tuple ) -> bool: """simple docstring""" if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False __UpperCAmelCase =[7, 11, 13, 17] for i, test in enumerate(A_ ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def lowercase__ ( A_: int = 10 ) -> int: """simple docstring""" return sum( int("""""".join(map(A_ , A_ ) ) ) for num in permutations(range(A_ ) ) if is_substring_divisible(A_ ) ) if __name__ == "__main__": print(F"""{solution() = }""")
68
0
"""simple docstring""" from __future__ import annotations import string from itertools import cycle, product from pathlib import Path A_ = ( string.ascii_letters + string.digits + string.punctuation + string.whitespace ) A_ = [ord(letter) for letter in string.ascii_lowercase] A_ = {ord(char) for char in VALID_CHARS} A_ = ["the", "be", "to", "of", "and", "in", "that", "have"] def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowerCamelCase_ = "" lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = 42 for keychar, cipherchar in zip(cycle(lowerCAmelCase__ ) ,lowerCAmelCase__ ): lowerCamelCase_ = cipherchar ^ keychar if decodedchar not in VALID_INTS: return None decoded += chr(lowerCAmelCase__ ) return decoded def lowercase ( lowerCAmelCase__ ): lowerCamelCase_ = [] for key in product(lowerCAmelCase__ ,repeat=3 ): lowerCamelCase_ = try_key(lowerCAmelCase__ ,lowerCAmelCase__ ) if encoded is not None: possibles.append(lowerCAmelCase__ ) return possibles def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ): return [possible for possible in possibles if common_word in possible.lower()] def lowercase ( lowerCAmelCase__ = "p059_cipher.txt" ): lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = Path(lowerCAmelCase__ ).parent.joinpath(lowerCAmelCase__ ).read_text(encoding='''utf-8''' ) lowerCamelCase_ = [int(lowerCAmelCase__ ) for number in data.strip().split(''',''' )] lowerCamelCase_ = filter_valid_chars(lowerCAmelCase__ ) for common_word in COMMON_WORDS: lowerCamelCase_ = filter_common_word(lowerCAmelCase__ ,lowerCAmelCase__ ) if len(lowerCAmelCase__ ) == 1: break lowerCamelCase_ = possibles[0] return sum(ord(lowerCAmelCase__ ) for char in decoded_text ) if __name__ == "__main__": print(f"{solution() = }")
29
from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar __A = TypeVar("T") def lowercase__ ( A_: int ) -> int: """simple docstring""" return (position - 1) // 2 def lowercase__ ( A_: int ) -> int: """simple docstring""" return (2 * position) + 1 def lowercase__ ( A_: int ) -> int: """simple docstring""" return (2 * position) + 2 class _A ( Generic[T] ): """simple docstring""" def __init__( self : List[str] ) -> None: __UpperCAmelCase =[] __UpperCAmelCase ={} __UpperCAmelCase =0 def __len__( self : str ) -> int: return self.elements def __repr__( self : Dict ) -> str: return str(self.heap ) def _a ( self : Optional[int] ) -> bool: # Check if the priority queue is empty return self.elements == 0 def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None: # Add an element with given priority to the queue self.heap.append((elem, weight) ) __UpperCAmelCase =self.elements self.elements += 1 self._bubble_up(__SCREAMING_SNAKE_CASE ) def _a ( self : Optional[int] ) -> T: # Remove and return the element with lowest weight (highest priority) if self.elements > 1: self._swap_nodes(0 , self.elements - 1 ) __UpperCAmelCase , __UpperCAmelCase =self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: __UpperCAmelCase , __UpperCAmelCase =self.heap[0] self._bubble_down(__SCREAMING_SNAKE_CASE ) return elem def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None: # Update the weight of the given key __UpperCAmelCase =self.position_map[elem] __UpperCAmelCase =(elem, weight) if position > 0: __UpperCAmelCase =get_parent_position(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase , __UpperCAmelCase =self.heap[parent_position] if parent_weight > weight: self._bubble_up(__SCREAMING_SNAKE_CASE ) else: self._bubble_down(__SCREAMING_SNAKE_CASE ) else: self._bubble_down(__SCREAMING_SNAKE_CASE ) def _a ( self : Any , __SCREAMING_SNAKE_CASE : T ) -> None: # Place a node at the proper position (upward movement) [to be used internally # only] __UpperCAmelCase =self.position_map[elem] if curr_pos == 0: return None __UpperCAmelCase =get_parent_position(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase , __UpperCAmelCase =self.heap[curr_pos] __UpperCAmelCase , __UpperCAmelCase =self.heap[parent_position] if parent_weight > weight: self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return self._bubble_up(__SCREAMING_SNAKE_CASE ) return None def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : T ) -> None: # Place a node at the proper position (downward movement) [to be used # internally only] __UpperCAmelCase =self.position_map[elem] __UpperCAmelCase , __UpperCAmelCase =self.heap[curr_pos] __UpperCAmelCase =get_child_left_position(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =get_child_right_position(__SCREAMING_SNAKE_CASE ) if child_left_position < self.elements and child_right_position < self.elements: __UpperCAmelCase , __UpperCAmelCase =self.heap[child_left_position] __UpperCAmelCase , __UpperCAmelCase =self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return self._bubble_down(__SCREAMING_SNAKE_CASE ) if child_left_position < self.elements: __UpperCAmelCase , __UpperCAmelCase =self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return self._bubble_down(__SCREAMING_SNAKE_CASE ) else: return None if child_right_position < self.elements: __UpperCAmelCase , __UpperCAmelCase =self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return self._bubble_down(__SCREAMING_SNAKE_CASE ) return None def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> None: # Swap the nodes at the given positions __UpperCAmelCase =self.heap[nodea_pos][0] __UpperCAmelCase =self.heap[nodea_pos][0] __UpperCAmelCase , __UpperCAmelCase =( self.heap[nodea_pos], self.heap[nodea_pos], ) __UpperCAmelCase =nodea_pos __UpperCAmelCase =nodea_pos class _A ( Generic[T] ): """simple docstring""" def __init__( self : List[Any] ) -> None: __UpperCAmelCase ={} __UpperCAmelCase =0 def __repr__( self : Tuple ) -> str: return str(self.connections ) def __len__( self : str ) -> int: return self.nodes def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : T ) -> None: # Add a node in the graph if it is not in the graph if node not in self.connections: __UpperCAmelCase ={} self.nodes += 1 def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None: # Add an edge between 2 nodes in the graph self.add_node(__SCREAMING_SNAKE_CASE ) self.add_node(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =weight __UpperCAmelCase =weight def lowercase__ ( A_: GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]: """simple docstring""" __UpperCAmelCase ={node: maxsize for node in graph.connections} __UpperCAmelCase ={node: None for node in graph.connections} __UpperCAmelCase =MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(A_ , A_ ) if priority_queue.is_empty(): return dist, parent # initialization __UpperCAmelCase =priority_queue.extract_min() __UpperCAmelCase =0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: __UpperCAmelCase =dist[node] + graph.connections[node][neighbour] priority_queue.update_key(A_ , dist[neighbour] ) __UpperCAmelCase =node # running prim's algorithm while not priority_queue.is_empty(): __UpperCAmelCase =priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: __UpperCAmelCase =dist[node] + graph.connections[node][neighbour] priority_queue.update_key(A_ , dist[neighbour] ) __UpperCAmelCase =node return dist, parent
68
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __a = { 'configuration_bloom': ['BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BloomConfig', 'BloomOnnxConfig'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['BloomTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST', 'BloomForCausalLM', 'BloomModel', 'BloomPreTrainedModel', 'BloomForSequenceClassification', 'BloomForTokenClassification', 'BloomForQuestionAnswering', ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
30
from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf __A = logging.get_logger(__name__) @dataclass class _A ( UpperCamelCase ): """simple docstring""" lowerCamelCase : Optional[int] = [ 'no_inference', 'no_cuda', 'no_tpu', 'no_speed', 'no_memory', 'no_env_print', 'no_multi_process', ] def __init__( self : Any , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Dict: for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: __UpperCAmelCase =deprecated_arg[3:] __UpperCAmelCase =not kwargs.pop(__SCREAMING_SNAKE_CASE ) logger.warning( f'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or''' f''' {positive_arg}={kwargs[positive_arg]}''' ) __UpperCAmelCase =kwargs.pop("""tpu_name""" , self.tpu_name ) __UpperCAmelCase =kwargs.pop("""device_idx""" , self.device_idx ) __UpperCAmelCase =kwargs.pop("""eager_mode""" , self.eager_mode ) __UpperCAmelCase =kwargs.pop("""use_xla""" , self.use_xla ) super().__init__(**__SCREAMING_SNAKE_CASE ) lowerCamelCase : str = field( default=UpperCamelCase , metadata={'help': 'Name of TPU'} , ) lowerCamelCase : int = field( default=0 , metadata={'help': 'CPU / GPU device index. Defaults to 0.'} , ) lowerCamelCase : bool = field(default=UpperCamelCase , metadata={'help': 'Benchmark models in eager model.'} ) lowerCamelCase : bool = field( default=UpperCamelCase , metadata={ 'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.' } , ) @cached_property def _a ( self : List[str] ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]: requires_backends(self , ["""tf"""] ) __UpperCAmelCase =None if self.tpu: try: if self.tpu_name: __UpperCAmelCase =tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: __UpperCAmelCase =tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: __UpperCAmelCase =None return tpu @cached_property def _a ( self : Tuple ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]: requires_backends(self , ["""tf"""] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) __UpperCAmelCase =tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" ) __UpperCAmelCase =tf.distribute.OneDeviceStrategy(device=f'''/gpu:{self.device_idx}''' ) else: tf.config.set_visible_devices([] , """GPU""" ) # disable GPU __UpperCAmelCase =tf.distribute.OneDeviceStrategy(device=f'''/cpu:{self.device_idx}''' ) return strategy @property def _a ( self : Optional[Any] ) -> bool: requires_backends(self , ["""tf"""] ) return self._setup_tpu is not None @property def _a ( self : str ) -> "tf.distribute.Strategy": requires_backends(self , ["""tf"""] ) return self._setup_strategy @property def _a ( self : Dict ) -> Optional[int]: requires_backends(self , ["""tf"""] ) return tf.config.list_physical_devices("""GPU""" ) @property def _a ( self : List[str] ) -> int: requires_backends(self , ["""tf"""] ) if self.cuda: return len(self.gpu_list ) return 0 @property def _a ( self : List[str] ) -> bool: return self.n_gpu > 0
68
0
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase__ : Optional[int] = logging.get_logger(__name__) lowerCamelCase__ : Any = { 'microsoft/git-base': 'https://huggingface.co/microsoft/git-base/resolve/main/config.json', } class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = "git_vision_model" def __init__( self : str , _lowerCAmelCase : Dict=768 , _lowerCAmelCase : str=3_072 , _lowerCAmelCase : Optional[Any]=12 , _lowerCAmelCase : Union[str, Any]=12 , _lowerCAmelCase : List[Any]=3 , _lowerCAmelCase : Any=224 , _lowerCAmelCase : List[Any]=16 , _lowerCAmelCase : Optional[Any]="quick_gelu" , _lowerCAmelCase : Tuple=1E-5 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : Tuple=0.02 , **_lowerCAmelCase : Any , ): super().__init__(**_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = num_channels SCREAMING_SNAKE_CASE_ = patch_size SCREAMING_SNAKE_CASE_ = image_size SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = attention_dropout SCREAMING_SNAKE_CASE_ = layer_norm_eps SCREAMING_SNAKE_CASE_ = hidden_act @classmethod def lowerCAmelCase_ ( cls : Union[str, Any] , _lowerCAmelCase : Union[str, os.PathLike] , **_lowerCAmelCase : Any ): cls._set_token_in_kwargs(_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = cls.get_config_dict(_lowerCAmelCase , **_lowerCAmelCase ) # get the vision config dict if we are loading from GITConfig if config_dict.get('model_type' ) == "git": SCREAMING_SNAKE_CASE_ = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F"You are using a model of type {config_dict['model_type']} to instantiate a model of type " F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(_lowerCAmelCase , **_lowerCAmelCase ) class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = "git" def __init__( self : Union[str, Any] , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Optional[Any]=30_522 , _lowerCAmelCase : Union[str, Any]=768 , _lowerCAmelCase : List[str]=6 , _lowerCAmelCase : Optional[int]=12 , _lowerCAmelCase : str=3_072 , _lowerCAmelCase : Any="gelu" , _lowerCAmelCase : Optional[Any]=0.1 , _lowerCAmelCase : Optional[int]=0.1 , _lowerCAmelCase : Optional[int]=1_024 , _lowerCAmelCase : int=0.02 , _lowerCAmelCase : Any=1E-12 , _lowerCAmelCase : List[Any]=0 , _lowerCAmelCase : Any="absolute" , _lowerCAmelCase : str=True , _lowerCAmelCase : Optional[Any]=False , _lowerCAmelCase : Optional[Any]=101 , _lowerCAmelCase : Tuple=102 , _lowerCAmelCase : Any=None , **_lowerCAmelCase : List[str] , ): super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , pad_token_id=_lowerCAmelCase , **_lowerCAmelCase ) if vision_config is None: SCREAMING_SNAKE_CASE_ = {} logger.info('vision_config is None. initializing the GitVisionConfig with default values.' ) SCREAMING_SNAKE_CASE_ = GitVisionConfig(**_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = layer_norm_eps SCREAMING_SNAKE_CASE_ = position_embedding_type SCREAMING_SNAKE_CASE_ = use_cache SCREAMING_SNAKE_CASE_ = tie_word_embeddings SCREAMING_SNAKE_CASE_ = num_image_with_embedding SCREAMING_SNAKE_CASE_ = bos_token_id SCREAMING_SNAKE_CASE_ = eos_token_id def lowerCAmelCase_ ( self : Dict ): SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE_ = self.vision_config.to_dict() SCREAMING_SNAKE_CASE_ = self.__class__.model_type return output
31
import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class _A ( unittest.TestCase ): """simple docstring""" @property def _a ( self : List[str] ) -> Dict: torch.manual_seed(0 ) __UpperCAmelCase =UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , ) return model def _a ( self : int ) -> Union[str, Any]: __UpperCAmelCase =self.dummy_uncond_unet __UpperCAmelCase =ScoreSdeVeScheduler() __UpperCAmelCase =ScoreSdeVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE ) sde_ve.to(__SCREAMING_SNAKE_CASE ) sde_ve.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =torch.manual_seed(0 ) __UpperCAmelCase =sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=__SCREAMING_SNAKE_CASE ).images __UpperCAmelCase =torch.manual_seed(0 ) __UpperCAmelCase =sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )[ 0 ] __UpperCAmelCase =image[0, -3:, -3:, -1] __UpperCAmelCase =image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __UpperCAmelCase =np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class _A ( unittest.TestCase ): """simple docstring""" def _a ( self : Optional[int] ) -> int: __UpperCAmelCase ="""google/ncsnpp-church-256""" __UpperCAmelCase =UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =ScoreSdeVeScheduler.from_pretrained(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =ScoreSdeVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE ) sde_ve.to(__SCREAMING_SNAKE_CASE ) sde_ve.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =torch.manual_seed(0 ) __UpperCAmelCase =sde_ve(num_inference_steps=10 , output_type="""numpy""" , generator=__SCREAMING_SNAKE_CASE ).images __UpperCAmelCase =image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) __UpperCAmelCase =np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
68
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { "microsoft/swin-tiny-patch4-window7-224": ( "https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json" ), # See all Swin models at https://huggingface.co/models?filter=swin } class __UpperCamelCase ( A__ , A__ ): __A : Union[str, Any] = """swin""" __A : Tuple = { """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self , _UpperCamelCase=224 , _UpperCamelCase=4 , _UpperCamelCase=3 , _UpperCamelCase=96 , _UpperCamelCase=[2, 2, 6, 2] , _UpperCamelCase=[3, 6, 12, 24] , _UpperCamelCase=7 , _UpperCamelCase=4.0 , _UpperCamelCase=True , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=0.1 , _UpperCamelCase="gelu" , _UpperCamelCase=False , _UpperCamelCase=0.02 , _UpperCamelCase=1e-5 , _UpperCamelCase=32 , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase , ): super().__init__(**_UpperCamelCase ) _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = embed_dim _UpperCAmelCase = depths _UpperCAmelCase = len(_UpperCamelCase ) _UpperCAmelCase = num_heads _UpperCAmelCase = window_size _UpperCAmelCase = mlp_ratio _UpperCAmelCase = qkv_bias _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = drop_path_rate _UpperCAmelCase = hidden_act _UpperCAmelCase = use_absolute_embeddings _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = initializer_range _UpperCAmelCase = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _UpperCAmelCase = int(embed_dim * 2 ** (len(_UpperCamelCase ) - 1) ) _UpperCAmelCase = ['''stem'''] + [f'''stage{idx}''' for idx in range(1 , len(_UpperCamelCase ) + 1 )] _UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices( out_features=_UpperCamelCase , out_indices=_UpperCamelCase , stage_names=self.stage_names ) class __UpperCamelCase ( A__ ): __A : Dict = version.parse("""1.11""" ) @property def UpperCamelCase( self ): return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def UpperCamelCase( self ): return 1e-4
32
from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging from transformers.integrations import is_fairscale_available from transformers.models.fsmt.configuration_fsmt import FSMTConfig from transformers.optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.trainer_pt_utils import get_tpu_sampler from transformers.training_args import ParallelMode from transformers.utils import is_torch_tpu_available if is_fairscale_available(): from fairscale.optim import OSS __A = logging.get_logger(__name__) __A = { "linear": get_linear_schedule_with_warmup, "cosine": get_cosine_schedule_with_warmup, "cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup, "polynomial": get_polynomial_decay_schedule_with_warmup, "constant": get_constant_schedule, "constant_w_warmup": get_constant_schedule_with_warmup, } class _A ( UpperCamelCase ): """simple docstring""" def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : str=None , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Any: super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if config is None: assert isinstance(self.model , __SCREAMING_SNAKE_CASE ), ( "If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is" f''' {self.model.__class__}''' ) __UpperCAmelCase =self.model.config else: __UpperCAmelCase =config __UpperCAmelCase =data_args __UpperCAmelCase =self.config.tgt_vocab_size if isinstance(self.config , __SCREAMING_SNAKE_CASE ) else self.config.vocab_size if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss): assert self.config.pad_token_id is not None, ( "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss" " calculation or doing label smoothing." ) if self.config.pad_token_id is None and self.config.eos_token_id is not None: logger.warning( f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for''' """ padding..""" ) if self.args.label_smoothing == 0: __UpperCAmelCase =torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id ) else: # dynamically import label_smoothed_nll_loss from utils import label_smoothed_nll_loss __UpperCAmelCase =label_smoothed_nll_loss def _a ( self : Any , __SCREAMING_SNAKE_CASE : int ) -> Any: if self.optimizer is None: __UpperCAmelCase =["""bias""", """LayerNorm.weight"""] __UpperCAmelCase =[ { """params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )], """weight_decay""": self.args.weight_decay, }, { """params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )], """weight_decay""": 0.0, }, ] __UpperCAmelCase =Adafactor if self.args.adafactor else AdamW if self.args.adafactor: __UpperCAmelCase =Adafactor __UpperCAmelCase ={"""scale_parameter""": False, """relative_step""": False} else: __UpperCAmelCase =AdamW __UpperCAmelCase ={ """betas""": (self.args.adam_betaa, self.args.adam_betaa), """eps""": self.args.adam_epsilon, } __UpperCAmelCase =self.args.learning_rate if self.sharded_ddp: __UpperCAmelCase =OSS( params=__SCREAMING_SNAKE_CASE , optim=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) else: __UpperCAmelCase =optimizer_cls(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if self.lr_scheduler is None: __UpperCAmelCase =self._get_lr_scheduler(__SCREAMING_SNAKE_CASE ) else: # ignoring --lr_scheduler logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" ) def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Any: __UpperCAmelCase =arg_to_scheduler[self.args.lr_scheduler] if self.args.lr_scheduler == "constant": __UpperCAmelCase =schedule_func(self.optimizer ) elif self.args.lr_scheduler == "constant_w_warmup": __UpperCAmelCase =schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps ) else: __UpperCAmelCase =schedule_func( self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__SCREAMING_SNAKE_CASE ) return scheduler def _a ( self : Optional[Any] ) -> Optional[torch.utils.data.Sampler]: if isinstance(self.train_dataset , torch.utils.data.IterableDataset ): return None elif is_torch_tpu_available(): return get_tpu_sampler(self.train_dataset ) else: if self.args.sortish_sampler: self.train_dataset.make_sortish_sampler( self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , ) return ( RandomSampler(self.train_dataset ) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset ) ) def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple: if self.args.label_smoothing == 0: if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token __UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[0] __UpperCAmelCase =self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) ) else: # compute usual loss via models __UpperCAmelCase , __UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[:2] else: # compute label smoothed loss __UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[0] __UpperCAmelCase =torch.nn.functional.log_softmax(__SCREAMING_SNAKE_CASE , dim=-1 ) __UpperCAmelCase , __UpperCAmelCase =self.loss_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.args.label_smoothing , ignore_index=self.config.pad_token_id ) return loss, logits def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Dict: __UpperCAmelCase =inputs.pop("""labels""" ) __UpperCAmelCase , __UpperCAmelCase =self._compute_loss(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return loss def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : nn.Module , __SCREAMING_SNAKE_CASE : Dict[str, Union[torch.Tensor, Any]] , __SCREAMING_SNAKE_CASE : bool , __SCREAMING_SNAKE_CASE : Optional[List[str]] = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: __UpperCAmelCase =self._prepare_inputs(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase ={ """max_length""": self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, """num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams, } if self.args.predict_with_generate and not self.args.prediction_loss_only: __UpperCAmelCase =self.model.generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **__SCREAMING_SNAKE_CASE , ) # in case the batch is shorter than max length, the output should be padded if generated_tokens.shape[-1] < gen_kwargs["max_length"]: __UpperCAmelCase =self._pad_tensors_to_max_len(__SCREAMING_SNAKE_CASE , gen_kwargs["""max_length"""] ) __UpperCAmelCase =inputs.pop("""labels""" ) with torch.no_grad(): # compute loss on predict data __UpperCAmelCase , __UpperCAmelCase =self._compute_loss(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =loss.mean().detach() if self.args.prediction_loss_only: return (loss, None, None) __UpperCAmelCase =generated_tokens if self.args.predict_with_generate else logits if labels.shape[-1] < gen_kwargs["max_length"]: __UpperCAmelCase =self._pad_tensors_to_max_len(__SCREAMING_SNAKE_CASE , gen_kwargs["""max_length"""] ) return (loss, logits, labels) def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int ) -> List[Any]: # If PAD token is not defined at least EOS token has to be defined __UpperCAmelCase =self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id if pad_token_id is None: raise ValueError( """Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be""" f''' padded to `max_length`={max_length}''' ) __UpperCAmelCase =pad_token_id * torch.ones( (tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device ) __UpperCAmelCase =tensor return padded_tensor
68
0
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]: global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: snake_case__ = mf_knapsack(i - 1 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) else: snake_case__ = max( mf_knapsack(i - 1 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , mf_knapsack(i - 1 , __lowerCAmelCase , __lowerCAmelCase , j - wt[i - 1] ) + val[i - 1] , ) snake_case__ = val return f[i][j] def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]: snake_case__ = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: snake_case__ = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: snake_case__ = dp[i - 1][w_] return dp[n][w_], dp def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any: if not (isinstance(__lowerCAmelCase , (list, tuple) ) and isinstance(__lowerCAmelCase , (list, tuple) )): raise ValueError( '''Both the weights and values vectors must be either lists or tuples''' ) snake_case__ = len(__lowerCAmelCase ) if num_items != len(__lowerCAmelCase ): snake_case__ = ( '''The number of weights must be the same as the number of values.\n''' F"""But got {num_items} weights and {len(__lowerCAmelCase )} values""" ) raise ValueError(__lowerCAmelCase ) for i in range(__lowerCAmelCase ): if not isinstance(wt[i] , __lowerCAmelCase ): snake_case__ = ( '''All weights must be integers but got weight of ''' F"""type {type(wt[i] )} at index {i}""" ) raise TypeError(__lowerCAmelCase ) snake_case__ , snake_case__ = knapsack(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) snake_case__ = set() _construct_solution(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return optimal_val, example_optional_set def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]: # for the current item i at a maximum weight j to be part of an optimal subset, # the optimal value at (i, j) must be greater than the optimal value at (i-1, j). # where i - 1 means considering only the previous items at the given maximum weight if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(__lowerCAmelCase , __lowerCAmelCase , i - 1 , __lowerCAmelCase , __lowerCAmelCase ) else: optimal_set.add(__lowerCAmelCase ) _construct_solution(__lowerCAmelCase , __lowerCAmelCase , i - 1 , j - wt[i - 1] , __lowerCAmelCase ) if __name__ == "__main__": lowerCamelCase__ : Tuple = [3, 2, 4, 4] lowerCamelCase__ : List[Any] = [4, 3, 2, 3] lowerCamelCase__ : Optional[Any] = 4 lowerCamelCase__ : Tuple = 6 lowerCamelCase__ : str = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] lowerCamelCase__ , lowerCamelCase__ : str = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 lowerCamelCase__ , lowerCamelCase__ : Optional[int] = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print("""optimal_value = """, optimal_solution) print("""An optimal subset corresponding to the optimal value""", optimal_subset)
33
import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _A ( UpperCamelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase : List[Any] = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline' def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str=0 ) -> Any: __UpperCAmelCase =floats_tensor((1, 3, 128, 128) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ) __UpperCAmelCase =np.random.RandomState(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase ={ """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 3, """strength""": 0.75, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def _a ( self : Optional[Any] ) -> int: __UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =self.get_dummy_inputs() __UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images __UpperCAmelCase =image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) __UpperCAmelCase =np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def _a ( self : Union[str, Any] ) -> Union[str, Any]: __UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __UpperCAmelCase =PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =self.get_dummy_inputs() __UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images __UpperCAmelCase =image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __UpperCAmelCase =np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _a ( self : Optional[Any] ) -> Dict: __UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __UpperCAmelCase =LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) # warmup pass to apply optimizations __UpperCAmelCase =pipe(**self.get_dummy_inputs() ) __UpperCAmelCase =self.get_dummy_inputs() __UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images __UpperCAmelCase =image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __UpperCAmelCase =np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _a ( self : List[Any] ) -> List[str]: __UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __UpperCAmelCase =EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =self.get_dummy_inputs() __UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images __UpperCAmelCase =image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __UpperCAmelCase =np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _a ( self : Union[str, Any] ) -> Optional[Any]: __UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __UpperCAmelCase =EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =self.get_dummy_inputs() __UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images __UpperCAmelCase =image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __UpperCAmelCase =np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _a ( self : Union[str, Any] ) -> Dict: __UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __UpperCAmelCase =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =self.get_dummy_inputs() __UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images __UpperCAmelCase =image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __UpperCAmelCase =np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class _A ( unittest.TestCase ): """simple docstring""" @property def _a ( self : List[str] ) -> Optional[int]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _a ( self : Dict ) -> int: __UpperCAmelCase =ort.SessionOptions() __UpperCAmelCase =False return options def _a ( self : Dict ) -> Any: __UpperCAmelCase =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) __UpperCAmelCase =init_image.resize((768, 512) ) # using the PNDM scheduler by default __UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained( """CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase ="""A fantasy landscape, trending on artstation""" __UpperCAmelCase =np.random.RandomState(0 ) __UpperCAmelCase =pipe( prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__SCREAMING_SNAKE_CASE , output_type="""np""" , ) __UpperCAmelCase =output.images __UpperCAmelCase =images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) __UpperCAmelCase =np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def _a ( self : List[str] ) -> str: __UpperCAmelCase =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) __UpperCAmelCase =init_image.resize((768, 512) ) __UpperCAmelCase =LMSDiscreteScheduler.from_pretrained( """runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" ) __UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase ="""A fantasy landscape, trending on artstation""" __UpperCAmelCase =np.random.RandomState(0 ) __UpperCAmelCase =pipe( prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__SCREAMING_SNAKE_CASE , output_type="""np""" , ) __UpperCAmelCase =output.images __UpperCAmelCase =images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) __UpperCAmelCase =np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
68
0
"""simple docstring""" import numpy as np def __snake_case ( _lowercase ): """simple docstring""" return 1 / (1 + np.exp(-vector )) def __snake_case ( _lowercase ): """simple docstring""" return vector * sigmoid(1.702 * vector ) if __name__ == "__main__": import doctest doctest.testmod()
34
import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors __A = logging.getLogger(__name__) class _A ( UpperCamelCase ): """simple docstring""" lowerCamelCase : Optional[Any] = 'sequence-classification' def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Tuple ) -> Optional[Any]: if type(__SCREAMING_SNAKE_CASE ) == dict: __UpperCAmelCase =Namespace(**__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =glue_output_modes[hparams.task] __UpperCAmelCase =glue_tasks_num_labels[hparams.task] super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.mode ) def _a ( self : str , **__SCREAMING_SNAKE_CASE : Dict ) -> List[str]: return self.model(**__SCREAMING_SNAKE_CASE ) def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict ) -> List[Any]: __UpperCAmelCase ={"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: __UpperCAmelCase =batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None __UpperCAmelCase =self(**__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =outputs[0] __UpperCAmelCase =self.trainer.lr_schedulers[0]["""scheduler"""] __UpperCAmelCase ={"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def _a ( self : Tuple ) -> List[Any]: __UpperCAmelCase =self.hparams __UpperCAmelCase =processors[args.task]() __UpperCAmelCase =processor.get_labels() for mode in ["train", "dev"]: __UpperCAmelCase =self._feature_file(__SCREAMING_SNAKE_CASE ) if os.path.exists(__SCREAMING_SNAKE_CASE ) and not args.overwrite_cache: logger.info("""Loading features from cached file %s""" , __SCREAMING_SNAKE_CASE ) else: logger.info("""Creating features from dataset file at %s""" , args.data_dir ) __UpperCAmelCase =( processor.get_dev_examples(args.data_dir ) if mode == """dev""" else processor.get_train_examples(args.data_dir ) ) __UpperCAmelCase =convert_examples_to_features( __SCREAMING_SNAKE_CASE , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info("""Saving features into cached file %s""" , __SCREAMING_SNAKE_CASE ) torch.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : bool = False ) -> DataLoader: __UpperCAmelCase ="""dev""" if mode == """test""" else mode __UpperCAmelCase =self._feature_file(__SCREAMING_SNAKE_CASE ) logger.info("""Loading features from cached file %s""" , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =torch.load(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =torch.tensor([f.input_ids for f in features] , dtype=torch.long ) __UpperCAmelCase =torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) __UpperCAmelCase =torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) if self.hparams.glue_output_mode == "classification": __UpperCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.long ) elif self.hparams.glue_output_mode == "regression": __UpperCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.float ) return DataLoader( TensorDataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , batch_size=__SCREAMING_SNAKE_CASE , shuffle=__SCREAMING_SNAKE_CASE , ) def _a ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int ) -> str: __UpperCAmelCase ={"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: __UpperCAmelCase =batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None __UpperCAmelCase =self(**__SCREAMING_SNAKE_CASE ) __UpperCAmelCase , __UpperCAmelCase =outputs[:2] __UpperCAmelCase =logits.detach().cpu().numpy() __UpperCAmelCase =inputs["""labels"""].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Any ) -> tuple: __UpperCAmelCase =torch.stack([x["""val_loss"""] for x in outputs] ).mean().detach().cpu().item() __UpperCAmelCase =np.concatenate([x["""pred"""] for x in outputs] , axis=0 ) if self.hparams.glue_output_mode == "classification": __UpperCAmelCase =np.argmax(__SCREAMING_SNAKE_CASE , axis=1 ) elif self.hparams.glue_output_mode == "regression": __UpperCAmelCase =np.squeeze(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =np.concatenate([x["""target"""] for x in outputs] , axis=0 ) __UpperCAmelCase =[[] for _ in range(out_label_ids.shape[0] )] __UpperCAmelCase =[[] for _ in range(out_label_ids.shape[0] )] __UpperCAmelCase ={**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )} __UpperCAmelCase =dict(results.items() ) __UpperCAmelCase =results return ret, preds_list, out_label_list def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : list ) -> dict: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._eval_end(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =ret["""log"""] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : List[str] ) -> dict: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._eval_end(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =ret["""log"""] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def _a ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict ) -> Optional[Any]: BaseTransformer.add_model_specific_args(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) parser.add_argument( """--max_seq_length""" , default=128 , type=__SCREAMING_SNAKE_CASE , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--task""" , default="""""" , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""The GLUE task to run""" , ) parser.add_argument( """--gpus""" , default=0 , type=__SCREAMING_SNAKE_CASE , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , ) parser.add_argument( """--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" ) return parser def lowercase__ ( ) -> str: """simple docstring""" __UpperCAmelCase =argparse.ArgumentParser() add_generic_args(A_ , os.getcwd() ) __UpperCAmelCase =GLUETransformer.add_model_specific_args(A_ , os.getcwd() ) __UpperCAmelCase =parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: __UpperCAmelCase =os.path.join( """./results""" , F'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' , ) os.makedirs(args.output_dir ) __UpperCAmelCase =GLUETransformer(A_ ) __UpperCAmelCase =generic_train(A_ , A_ ) # Optionally, predict on dev set and write to output_dir if args.do_predict: __UpperCAmelCase =sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=A_ ) ) __UpperCAmelCase =model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(A_ ) if __name__ == "__main__": main()
68
0
import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class lowercase ( unittest.TestCase ): def __init__( self : Union[str, Any] , _lowercase : Tuple , _lowercase : Dict=7 , _lowercase : List[Any]=3 , _lowercase : str=18 , _lowercase : Optional[int]=30 , _lowercase : List[Any]=4_00 , _lowercase : Any=True , _lowercase : Optional[int]=None , _lowercase : int=True , _lowercase : List[str]=None , _lowercase : int=True , _lowercase : List[str]=[0.5, 0.5, 0.5] , _lowercase : Dict=[0.5, 0.5, 0.5] , _lowercase : Union[str, Any]=False , ): SCREAMING_SNAKE_CASE__ : Optional[int] = size if size is not None else {'''height''': 20, '''width''': 20} SCREAMING_SNAKE_CASE__ : Optional[int] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} SCREAMING_SNAKE_CASE__ : str = parent SCREAMING_SNAKE_CASE__ : Dict = batch_size SCREAMING_SNAKE_CASE__ : Optional[int] = num_channels SCREAMING_SNAKE_CASE__ : Optional[Any] = image_size SCREAMING_SNAKE_CASE__ : List[str] = min_resolution SCREAMING_SNAKE_CASE__ : Dict = max_resolution SCREAMING_SNAKE_CASE__ : int = do_resize SCREAMING_SNAKE_CASE__ : List[Any] = size SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_center_crop SCREAMING_SNAKE_CASE__ : Tuple = crop_size SCREAMING_SNAKE_CASE__ : List[Any] = do_normalize SCREAMING_SNAKE_CASE__ : str = image_mean SCREAMING_SNAKE_CASE__ : int = image_std SCREAMING_SNAKE_CASE__ : Optional[int] = do_reduce_labels def lowercase__ ( self : Tuple ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def a ( ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) SCREAMING_SNAKE_CASE__ : str = Image.open(dataset[0]['''file'''] ) SCREAMING_SNAKE_CASE__ : Dict = Image.open(dataset[1]['''file'''] ) return image, map def a ( ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) SCREAMING_SNAKE_CASE__ : Any = Image.open(ds[0]['''file'''] ) SCREAMING_SNAKE_CASE__ : Dict = Image.open(ds[1]['''file'''] ) SCREAMING_SNAKE_CASE__ : List[str] = Image.open(ds[2]['''file'''] ) SCREAMING_SNAKE_CASE__ : List[str] = Image.open(ds[3]['''file'''] ) return [imagea, imagea], [mapa, mapa] @require_torch @require_vision class lowercase ( _UpperCAmelCase , unittest.TestCase ): lowerCamelCase : Tuple = BeitImageProcessor if is_vision_available() else None def lowercase__ ( self : List[Any] ): SCREAMING_SNAKE_CASE__ : Tuple = BeitImageProcessingTester(self ) @property def lowercase__ ( self : Optional[Any] ): return self.image_processor_tester.prepare_image_processor_dict() def lowercase__ ( self : Tuple ): SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowercase , '''do_resize''' ) ) self.assertTrue(hasattr(_lowercase , '''size''' ) ) self.assertTrue(hasattr(_lowercase , '''do_center_crop''' ) ) self.assertTrue(hasattr(_lowercase , '''center_crop''' ) ) self.assertTrue(hasattr(_lowercase , '''do_normalize''' ) ) self.assertTrue(hasattr(_lowercase , '''image_mean''' ) ) self.assertTrue(hasattr(_lowercase , '''image_std''' ) ) def lowercase__ ( self : Optional[Any] ): SCREAMING_SNAKE_CASE__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) self.assertEqual(image_processor.do_reduce_labels , _lowercase ) SCREAMING_SNAKE_CASE__ : Tuple = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_lowercase ) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) self.assertEqual(image_processor.do_reduce_labels , _lowercase ) def lowercase__ ( self : str ): pass def lowercase__ ( self : int ): # Initialize image_processing SCREAMING_SNAKE_CASE__ : Any = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase ) for image in image_inputs: self.assertIsInstance(_lowercase , Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched SCREAMING_SNAKE_CASE__ : List[Any] = image_processing(_lowercase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def lowercase__ ( self : Dict ): # Initialize image_processing SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase ) for image in image_inputs: self.assertIsInstance(_lowercase , np.ndarray ) # Test not batched input SCREAMING_SNAKE_CASE__ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched SCREAMING_SNAKE_CASE__ : int = image_processing(_lowercase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def lowercase__ ( self : Optional[Any] ): # Initialize image_processing SCREAMING_SNAKE_CASE__ : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase ) for image in image_inputs: self.assertIsInstance(_lowercase , torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE__ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched SCREAMING_SNAKE_CASE__ : Any = image_processing(_lowercase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def lowercase__ ( self : Optional[int] ): # Initialize image_processing SCREAMING_SNAKE_CASE__ : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase ) SCREAMING_SNAKE_CASE__ : Any = [] for image in image_inputs: self.assertIsInstance(_lowercase , torch.Tensor ) maps.append(torch.zeros(image.shape[-2:] ).long() ) # Test not batched input SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 1, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_55 ) # Test batched SCREAMING_SNAKE_CASE__ : str = image_processing(_lowercase , _lowercase , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_55 ) # Test not batched input (PIL images) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = prepare_semantic_single_inputs() SCREAMING_SNAKE_CASE__ : int = image_processing(_lowercase , _lowercase , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 1, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_55 ) # Test batched input (PIL images) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = prepare_semantic_batch_inputs() SCREAMING_SNAKE_CASE__ : Any = image_processing(_lowercase , _lowercase , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 2, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_55 ) def lowercase__ ( self : int ): # Initialize image_processing SCREAMING_SNAKE_CASE__ : str = self.image_processing_class(**self.image_processor_dict ) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = prepare_semantic_single_inputs() SCREAMING_SNAKE_CASE__ : List[str] = image_processing(_lowercase , _lowercase , return_tensors='''pt''' ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 1_50 ) SCREAMING_SNAKE_CASE__ : List[Any] = True SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_processing(_lowercase , _lowercase , return_tensors='''pt''' ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
35
def lowercase__ ( A_: int , A_: int ) -> int: """simple docstring""" return 1 if input_a == input_a else 0 def lowercase__ ( ) -> None: """simple docstring""" assert xnor_gate(0 , 0 ) == 1 assert xnor_gate(0 , 1 ) == 0 assert xnor_gate(1 , 0 ) == 0 assert xnor_gate(1 , 1 ) == 1 if __name__ == "__main__": print(xnor_gate(0, 0)) print(xnor_gate(0, 1)) print(xnor_gate(1, 0)) print(xnor_gate(1, 1))
68
0
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available from ...utils import OptionalDependencyNotAvailable __lowercase : Union[str, Any] = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : List[str] = ['''DPTFeatureExtractor'''] __lowercase : Dict = ['''DPTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : List[Any] = [ '''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DPTForDepthEstimation''', '''DPTForSemanticSegmentation''', '''DPTModel''', '''DPTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_dpt import DPTFeatureExtractor from .image_processing_dpt import DPTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dpt import ( DPT_PRETRAINED_MODEL_ARCHIVE_LIST, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel, DPTPreTrainedModel, ) else: import sys __lowercase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
36
from __future__ import annotations import bisect def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> int: """simple docstring""" if hi < 0: __UpperCAmelCase =len(A_ ) while lo < hi: __UpperCAmelCase =lo + (hi - lo) // 2 if sorted_collection[mid] < item: __UpperCAmelCase =mid + 1 else: __UpperCAmelCase =mid return lo def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> int: """simple docstring""" if hi < 0: __UpperCAmelCase =len(A_ ) while lo < hi: __UpperCAmelCase =lo + (hi - lo) // 2 if sorted_collection[mid] <= item: __UpperCAmelCase =mid + 1 else: __UpperCAmelCase =mid return lo def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> None: """simple docstring""" sorted_collection.insert(bisect_left(A_ , A_ , A_ , A_ ) , A_ ) def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> None: """simple docstring""" sorted_collection.insert(bisect_right(A_ , A_ , A_ , A_ ) , A_ ) def lowercase__ ( A_: list[int] , A_: int ) -> int | None: """simple docstring""" __UpperCAmelCase =0 __UpperCAmelCase =len(A_ ) - 1 while left <= right: __UpperCAmelCase =left + (right - left) // 2 __UpperCAmelCase =sorted_collection[midpoint] if current_item == item: return midpoint elif item < current_item: __UpperCAmelCase =midpoint - 1 else: __UpperCAmelCase =midpoint + 1 return None def lowercase__ ( A_: list[int] , A_: int ) -> int | None: """simple docstring""" __UpperCAmelCase =bisect.bisect_left(A_ , A_ ) if index != len(A_ ) and sorted_collection[index] == item: return index return None def lowercase__ ( A_: list[int] , A_: int , A_: int , A_: int ) -> int | None: """simple docstring""" if right < left: return None __UpperCAmelCase =left + (right - left) // 2 if sorted_collection[midpoint] == item: return midpoint elif sorted_collection[midpoint] > item: return binary_search_by_recursion(A_ , A_ , A_ , midpoint - 1 ) else: return binary_search_by_recursion(A_ , A_ , midpoint + 1 , A_ ) if __name__ == "__main__": __A = input("Enter numbers separated by comma:\n").strip() __A = sorted(int(item) for item in user_input.split(",")) __A = int(input("Enter a single number to be found in the list:\n")) __A = binary_search(collection, target) if result is None: print(F"""{target} was not found in {collection}.""") else: print(F"""{target} was found at position {result} in {collection}.""")
68
0
from ..utils import DummyObject, requires_backends class A__ ( metaclass=A__ ): """simple docstring""" _lowercase = ['transformers', 'torch', 'note_seq'] def __init__( self : Optional[int] , *lowerCamelCase__ : int , **lowerCamelCase__ : int ): requires_backends(self , ["transformers", "torch", "note_seq"] ) @classmethod def _UpperCamelCase( cls : str , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : Optional[int] ): requires_backends(cls , ["transformers", "torch", "note_seq"] ) @classmethod def _UpperCamelCase( cls : str , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Any ): requires_backends(cls , ["transformers", "torch", "note_seq"] )
37
from typing import List from .keymap import KEYMAP, get_character def lowercase__ ( A_: str ) -> str: """simple docstring""" def decorator(A_: int ): __UpperCAmelCase =getattr(A_ , """handle_key""" , [] ) handle += [key] setattr(A_ , """handle_key""" , A_ ) return func return decorator def lowercase__ ( *A_: List[str] ) -> Optional[int]: """simple docstring""" def decorator(A_: Tuple ): __UpperCAmelCase =getattr(A_ , """handle_key""" , [] ) handle += keys setattr(A_ , """handle_key""" , A_ ) return func return decorator class _A ( UpperCamelCase ): """simple docstring""" def __new__( cls : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] ) -> int: __UpperCAmelCase =super().__new__(cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if not hasattr(__SCREAMING_SNAKE_CASE , """key_handler""" ): setattr(__SCREAMING_SNAKE_CASE , """key_handler""" , {} ) setattr(__SCREAMING_SNAKE_CASE , """handle_input""" , KeyHandler.handle_input ) for value in attrs.values(): __UpperCAmelCase =getattr(__SCREAMING_SNAKE_CASE , """handle_key""" , [] ) for key in handled_keys: __UpperCAmelCase =value return new_cls @staticmethod def _a ( cls : Dict ) -> List[Any]: __UpperCAmelCase =get_character() if char != KEYMAP["undefined"]: __UpperCAmelCase =ord(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =cls.key_handler.get(__SCREAMING_SNAKE_CASE ) if handler: __UpperCAmelCase =char return handler(cls ) else: return None def lowercase__ ( cls: str ) -> int: """simple docstring""" return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
68
0
'''simple docstring''' from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class __snake_case ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ): super().__init__( features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , streaming=__SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) snake_case__ : Tuple = Generator( cache_dir=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , gen_kwargs=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) def __UpperCamelCase ( self ): # Build iterable dataset if self.streaming: snake_case__ : Dict = self.builder.as_streaming_dataset(split="""train""" ) # Build regular (map-style) dataset else: snake_case__ : Optional[Any] = None snake_case__ : List[Any] = None snake_case__ : Dict = None snake_case__ : int = None self.builder.download_and_prepare( download_config=__SCREAMING_SNAKE_CASE , download_mode=__SCREAMING_SNAKE_CASE , verification_mode=__SCREAMING_SNAKE_CASE , base_path=__SCREAMING_SNAKE_CASE , num_proc=self.num_proc , ) snake_case__ : Optional[int] = self.builder.as_dataset( split="""train""" , verification_mode=__SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory ) return dataset
38
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline else: from .camera import create_pan_cameras from .pipeline_shap_e import ShapEPipeline from .pipeline_shap_e_img2img import ShapEImgaImgPipeline from .renderer import ( BoundingBoxVolume, ImportanceRaySampler, MLPNeRFModelOutput, MLPNeRSTFModel, ShapEParamsProjModel, ShapERenderer, StratifiedRaySampler, VoidNeRFModel, )
68
0
import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class snake_case_ : '''simple docstring''' def __init__( self : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : str=2 , _UpperCamelCase : Dict=8 , _UpperCamelCase : str=True , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Any=True , _UpperCamelCase : Dict=True , _UpperCamelCase : Optional[int]=9_9 , _UpperCamelCase : Any=1_6 , _UpperCamelCase : List[str]=5 , _UpperCamelCase : Optional[Any]=2 , _UpperCamelCase : str=3_6 , _UpperCamelCase : Dict="gelu" , _UpperCamelCase : Optional[int]=0.0 , _UpperCamelCase : str=0.0 , _UpperCamelCase : Any=5_1_2 , _UpperCamelCase : int=1_6 , _UpperCamelCase : List[Any]=2 , _UpperCamelCase : Tuple=0.02 , _UpperCamelCase : Any=3 , _UpperCamelCase : Dict=4 , _UpperCamelCase : Dict=None , ) ->Optional[Any]: snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_input_mask snake_case_ = use_token_type_ids snake_case_ = use_labels snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = type_vocab_size snake_case_ = type_sequence_label_size snake_case_ = initializer_range snake_case_ = num_labels snake_case_ = num_choices snake_case_ = scope def snake_case__( self : List[Any] ) ->List[Any]: snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = None if self.use_input_mask: snake_case_ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ = None if self.use_token_type_ids: snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case_ = None snake_case_ = None snake_case_ = None if self.use_labels: snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case_ = ids_tensor([self.batch_size] , self.num_choices ) snake_case_ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def snake_case__( self : Optional[int] ) ->Optional[Any]: return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , ) def snake_case__( self : List[Any] ) ->str: snake_case_ = self.get_config() snake_case_ = 3_0_0 return config def snake_case__( self : List[str] ) ->Optional[Any]: ( ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ) = self.prepare_config_and_inputs() snake_case_ = True snake_case_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) snake_case_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def snake_case__( self : Tuple , _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple , _UpperCamelCase : int , _UpperCamelCase : Dict ) ->Dict: snake_case_ = MraModel(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase ) snake_case_ = model(_UpperCamelCase , token_type_ids=_UpperCamelCase ) snake_case_ = model(_UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case__( self : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Any , ) ->Optional[Any]: snake_case_ = True snake_case_ = MraModel(_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() snake_case_ = model( _UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , encoder_attention_mask=_UpperCamelCase , ) snake_case_ = model( _UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , ) snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case__( self : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Dict , _UpperCamelCase : Dict , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : List[Any] ) ->str: snake_case_ = MraForMaskedLM(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case__( self : Optional[int] , _UpperCamelCase : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] ) ->Any: snake_case_ = MraForQuestionAnswering(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() snake_case_ = model( _UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , start_positions=_UpperCamelCase , end_positions=_UpperCamelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def snake_case__( self : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int , _UpperCamelCase : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : List[str] ) ->Optional[Any]: snake_case_ = self.num_labels snake_case_ = MraForSequenceClassification(_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def snake_case__( self : Optional[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : str , _UpperCamelCase : Optional[int] ) ->int: snake_case_ = self.num_labels snake_case_ = MraForTokenClassification(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def snake_case__( self : List[Any] , _UpperCamelCase : int , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Any ) ->Any: snake_case_ = self.num_choices snake_case_ = MraForMultipleChoice(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() snake_case_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case_ = model( _UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def snake_case__( self : str ) ->List[str]: snake_case_ = self.prepare_config_and_inputs() ( ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ) = config_and_inputs snake_case_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class snake_case_ ( __A , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE : Optional[int] = False SCREAMING_SNAKE_CASE : str = False SCREAMING_SNAKE_CASE : Optional[int] = False SCREAMING_SNAKE_CASE : Optional[int] = False SCREAMING_SNAKE_CASE : List[str] = () def snake_case__( self : List[str] ) ->Optional[int]: snake_case_ = MraModelTester(self ) snake_case_ = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=3_7 ) def snake_case__( self : Dict ) ->Optional[int]: self.config_tester.run_common_tests() def snake_case__( self : Union[str, Any] ) ->Dict: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCamelCase ) def snake_case__( self : int ) ->Optional[int]: snake_case_ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: snake_case_ = type self.model_tester.create_and_check_model(*_UpperCamelCase ) def snake_case__( self : Union[str, Any] ) ->List[Any]: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_UpperCamelCase ) def snake_case__( self : List[str] ) ->Tuple: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_UpperCamelCase ) def snake_case__( self : Any ) ->Optional[int]: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_UpperCamelCase ) def snake_case__( self : str ) ->List[str]: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_UpperCamelCase ) def snake_case__( self : int ) ->Any: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_UpperCamelCase ) @slow def snake_case__( self : int ) ->str: for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ = MraModel.from_pretrained(_UpperCamelCase ) self.assertIsNotNone(_UpperCamelCase ) @unittest.skip(reason='''MRA does not output attentions''' ) def snake_case__( self : Union[str, Any] ) ->Tuple: return @require_torch class snake_case_ ( unittest.TestCase ): '''simple docstring''' @slow def snake_case__( self : List[Any] ) ->Optional[Any]: snake_case_ = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' ) snake_case_ = torch.arange(2_5_6 ).unsqueeze(0 ) with torch.no_grad(): snake_case_ = model(_UpperCamelCase )[0] snake_case_ = torch.Size((1, 2_5_6, 7_6_8) ) self.assertEqual(output.shape , _UpperCamelCase ) snake_case_ = torch.tensor( [[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1e-4 ) ) @slow def snake_case__( self : List[str] ) ->int: snake_case_ = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' ) snake_case_ = torch.arange(2_5_6 ).unsqueeze(0 ) with torch.no_grad(): snake_case_ = model(_UpperCamelCase )[0] snake_case_ = 5_0_2_6_5 snake_case_ = torch.Size((1, 2_5_6, vocab_size) ) self.assertEqual(output.shape , _UpperCamelCase ) snake_case_ = torch.tensor( [[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1e-4 ) ) @slow def snake_case__( self : Union[str, Any] ) ->Any: snake_case_ = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' ) snake_case_ = torch.arange(4_0_9_6 ).unsqueeze(0 ) with torch.no_grad(): snake_case_ = model(_UpperCamelCase )[0] snake_case_ = 5_0_2_6_5 snake_case_ = torch.Size((1, 4_0_9_6, vocab_size) ) self.assertEqual(output.shape , _UpperCamelCase ) snake_case_ = torch.tensor( [[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1e-4 ) )
39
import unittest import numpy as np from transformers import RobertaConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.roberta.modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, ) class _A ( unittest.TestCase ): """simple docstring""" def __init__( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=13 , __SCREAMING_SNAKE_CASE : Dict=7 , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : Optional[Any]=99 , __SCREAMING_SNAKE_CASE : Dict=32 , __SCREAMING_SNAKE_CASE : int=5 , __SCREAMING_SNAKE_CASE : Dict=4 , __SCREAMING_SNAKE_CASE : str=37 , __SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : str=512 , __SCREAMING_SNAKE_CASE : Dict=16 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 , __SCREAMING_SNAKE_CASE : List[str]=4 , ) -> Optional[Any]: __UpperCAmelCase =parent __UpperCAmelCase =batch_size __UpperCAmelCase =seq_length __UpperCAmelCase =is_training __UpperCAmelCase =use_attention_mask __UpperCAmelCase =use_token_type_ids __UpperCAmelCase =use_labels __UpperCAmelCase =vocab_size __UpperCAmelCase =hidden_size __UpperCAmelCase =num_hidden_layers __UpperCAmelCase =num_attention_heads __UpperCAmelCase =intermediate_size __UpperCAmelCase =hidden_act __UpperCAmelCase =hidden_dropout_prob __UpperCAmelCase =attention_probs_dropout_prob __UpperCAmelCase =max_position_embeddings __UpperCAmelCase =type_vocab_size __UpperCAmelCase =type_sequence_label_size __UpperCAmelCase =initializer_range __UpperCAmelCase =num_choices def _a ( self : List[Any] ) -> List[str]: __UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase =None if self.use_attention_mask: __UpperCAmelCase =random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase =None if self.use_token_type_ids: __UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCAmelCase =RobertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _a ( self : Tuple ) -> Optional[int]: __UpperCAmelCase =self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =config_and_inputs __UpperCAmelCase ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict def _a ( self : List[str] ) -> Dict: __UpperCAmelCase =self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =config_and_inputs __UpperCAmelCase =True __UpperCAmelCase =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax class _A ( UpperCamelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase : Union[str, Any] = True lowerCamelCase : Union[str, Any] = ( ( FlaxRobertaModel, FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, ) if is_flax_available() else () ) def _a ( self : List[Any] ) -> List[str]: __UpperCAmelCase =FlaxRobertaModelTester(self ) @slow def _a ( self : Optional[Any] ) -> List[Any]: for model_class_name in self.all_model_classes: __UpperCAmelCase =model_class_name.from_pretrained("""roberta-base""" , from_pt=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model(np.ones((1, 1) ) ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
68
0
import math import random def UpperCamelCase ( snake_case__ : float , snake_case__ : bool = False ) -> float: if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value __UpperCAmelCase = 0.02 def UpperCamelCase ( snake_case__ : int , snake_case__ : int ) -> float: UpperCamelCase : Optional[Any] = float(2 * (random.randint(1 , 100 )) - 1 ) for _ in range(snake_case__ ): # Forward propagation UpperCamelCase : str = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? UpperCamelCase : int = (expected / 100) - layer_a # Error delta UpperCamelCase : List[str] = layer_1_error * sigmoid_function(snake_case__ , snake_case__ ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 100 if __name__ == "__main__": import doctest doctest.testmod() __UpperCAmelCase = int(input('''Expected value: ''')) __UpperCAmelCase = int(input('''Number of propagations: ''')) print(forward_propagation(expected, number_propagations))
40
from __future__ import annotations def lowercase__ ( A_: list[list[int]] ) -> int: """simple docstring""" for i in range(1 , len(matrix[0] ) ): matrix[0][i] += matrix[0][i - 1] # preprocessing the first column for i in range(1 , len(A_ ) ): matrix[i][0] += matrix[i - 1][0] # updating the path cost for current position for i in range(1 , len(A_ ) ): for j in range(1 , len(matrix[0] ) ): matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] ) return matrix[-1][-1] if __name__ == "__main__": import doctest doctest.testmod()
68
0
'''simple docstring''' import inspect import os import re from transformers.configuration_utils import PretrainedConfig from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py lowerCAmelCase__ = '''src/transformers''' # This is to make sure the transformers module imported is the one in the repo. lowerCAmelCase__ = direct_transformers_import(PATH_TO_TRANSFORMERS) lowerCAmelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING lowerCAmelCase__ = { # used to compute the property `self.chunk_length` '''EncodecConfig''': ['''overlap'''], # used as `self.bert_model = BertModel(config, ...)` '''DPRConfig''': True, # not used in modeling files, but it's an important information '''FSMTConfig''': ['''langs'''], # used internally in the configuration class file '''GPTNeoConfig''': ['''attention_types'''], # used internally in the configuration class file '''EsmConfig''': ['''is_folding_model'''], # used during training (despite we don't have training script for these models yet) '''Mask2FormerConfig''': ['''ignore_value'''], # `ignore_value` used during training (despite we don't have training script for these models yet) # `norm` used in conversion script (despite not using in the modeling file) '''OneFormerConfig''': ['''ignore_value''', '''norm'''], # used during preprocessing and collation, see `collating_graphormer.py` '''GraphormerConfig''': ['''spatial_pos_max'''], # used internally in the configuration class file '''T5Config''': ['''feed_forward_proj'''], # used internally in the configuration class file # `tokenizer_class` get default value `T5Tokenizer` intentionally '''MT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''], '''UMT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''], # used internally in the configuration class file '''LongT5Config''': ['''feed_forward_proj'''], # used internally in the configuration class file '''SwitchTransformersConfig''': ['''feed_forward_proj'''], # having default values other than `1e-5` - we can't fix them without breaking '''BioGptConfig''': ['''layer_norm_eps'''], # having default values other than `1e-5` - we can't fix them without breaking '''GLPNConfig''': ['''layer_norm_eps'''], # having default values other than `1e-5` - we can't fix them without breaking '''SegformerConfig''': ['''layer_norm_eps'''], # having default values other than `1e-5` - we can't fix them without breaking '''CvtConfig''': ['''layer_norm_eps'''], # having default values other than `1e-5` - we can't fix them without breaking '''PerceiverConfig''': ['''layer_norm_eps'''], # used internally to calculate the feature size '''InformerConfig''': ['''num_static_real_features''', '''num_time_features'''], # used internally to calculate the feature size '''TimeSeriesTransformerConfig''': ['''num_static_real_features''', '''num_time_features'''], # used internally to calculate the feature size '''AutoformerConfig''': ['''num_static_real_features''', '''num_time_features'''], # used internally to calculate `mlp_dim` '''SamVisionConfig''': ['''mlp_ratio'''], # For (head) training, but so far not implemented '''ClapAudioConfig''': ['''num_classes'''], # Not used, but providing useful information to users '''SpeechT5HifiGanConfig''': ['''sampling_rate'''], } # TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure SPECIAL_CASES_TO_ALLOW.update( { '''CLIPSegConfig''': True, '''DeformableDetrConfig''': True, '''DetaConfig''': True, '''DinatConfig''': True, '''DonutSwinConfig''': True, '''EfficientFormerConfig''': True, '''FSMTConfig''': True, '''JukeboxConfig''': True, '''LayoutLMv2Config''': True, '''MaskFormerSwinConfig''': True, '''MT5Config''': True, '''NatConfig''': True, '''OneFormerConfig''': True, '''PerceiverConfig''': True, '''RagConfig''': True, '''SpeechT5Config''': True, '''SwinConfig''': True, '''Swin2SRConfig''': True, '''Swinv2Config''': True, '''SwitchTransformersConfig''': True, '''TableTransformerConfig''': True, '''TapasConfig''': True, '''TransfoXLConfig''': True, '''UniSpeechConfig''': True, '''UniSpeechSatConfig''': True, '''WavLMConfig''': True, '''WhisperConfig''': True, # TODO: @Arthur (for `alignment_head` and `alignment_layer`) '''JukeboxPriorConfig''': True, # TODO: @Younes (for `is_decoder`) '''Pix2StructTextConfig''': True, } ) def _A ( A__ , A__ , A__ , A__ ): """simple docstring""" __lowercase = False for attribute in attributes: for modeling_source in source_strings: # check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)` if ( F"config.{attribute}" in modeling_source or F"getattr(config, \"{attribute}\"" in modeling_source or F"getattr(self.config, \"{attribute}\"" in modeling_source ): __lowercase = True # Deal with multi-line cases elif ( re.search( RF"getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"" , A__ , ) is not None ): __lowercase = True # `SequenceSummary` is called with `SequenceSummary(config)` elif attribute in [ "summary_type", "summary_use_proj", "summary_activation", "summary_last_dropout", "summary_proj_to_labels", "summary_first_dropout", ]: if "SequenceSummary" in modeling_source: __lowercase = True if attribute_used: break if attribute_used: break # common and important attributes, even if they do not always appear in the modeling files __lowercase = [ '''bos_index''', '''eos_index''', '''pad_index''', '''unk_index''', '''mask_index''', '''image_size''', '''use_cache''', '''out_features''', '''out_indices''', ] __lowercase = ['''encoder_no_repeat_ngram_size'''] # Special cases to be allowed __lowercase = True if not attribute_used: __lowercase = False for attribute in attributes: # Allow if the default value in the configuration class is different from the one in `PretrainedConfig` if attribute in ["is_encoder_decoder"] and default_value is True: __lowercase = True elif attribute in ["tie_word_embeddings"] and default_value is False: __lowercase = True # Allow cases without checking the default value in the configuration class elif attribute in attributes_to_allow + attributes_used_in_generation: __lowercase = True elif attribute.endswith('''_token_id''' ): __lowercase = True # configuration class specific cases if not case_allowed: __lowercase = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] ) __lowercase = allowed_cases is True or attribute in allowed_cases return attribute_used or case_allowed def _A ( A__ ): """simple docstring""" __lowercase = dict(inspect.signature(config_class.__init__ ).parameters ) __lowercase = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']] __lowercase = [signature[param].default for param in parameter_names] # If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long # as one variant is used, the test should pass __lowercase = {} if len(config_class.attribute_map ) > 0: __lowercase = {v: k for k, v in config_class.attribute_map.items()} # Get the path to modeling source files __lowercase = inspect.getsourcefile(A__ ) __lowercase = os.path.dirname(A__ ) # Let's check against all frameworks: as long as one framework uses an attribute, we are good. __lowercase = [os.path.join(A__ , A__ ) for fn in os.listdir(A__ ) if fn.startswith('''modeling_''' )] # Get the source code strings __lowercase = [] for path in modeling_paths: if os.path.isfile(A__ ): with open(A__ ) as fp: modeling_sources.append(fp.read() ) __lowercase = [] for config_param, default_value in zip(A__ , A__ ): # `attributes` here is all the variant names for `config_param` __lowercase = [config_param] # some configuration classes have non-empty `attribute_map`, and both names could be used in the # corresponding modeling files. As long as one of them appears, it is fine. if config_param in reversed_attribute_map: attributes.append(reversed_attribute_map[config_param] ) if not check_attribute_being_used(A__ , A__ , A__ , A__ ): unused_attributes.append(attributes[0] ) return sorted(A__ ) def _A ( ): """simple docstring""" __lowercase = {} for _config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in _config_class.__module__: continue # Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.) __lowercase = [ cls for name, cls in inspect.getmembers( inspect.getmodule(_config_class ) , lambda A__ : inspect.isclass(A__ ) and issubclass(A__ , A__ ) and inspect.getmodule(A__ ) == inspect.getmodule(_config_class ) , ) ] for config_class in config_classes_in_module: __lowercase = check_config_attributes_being_used(A__ ) if len(A__ ) > 0: __lowercase = unused_attributes if len(A__ ) > 0: __lowercase = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n''' for name, attributes in configs_with_unused_attributes.items(): error += F"{name}: {attributes}\n" raise ValueError(A__ ) if __name__ == "__main__": check_config_attributes()
41
import numpy as np from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey def lowercase__ ( A_: int , A_: int , A_: int , A_: int , A_: int , A_: int ) -> np.ndarray: """simple docstring""" if (ksize % 2) == 0: __UpperCAmelCase =ksize + 1 __UpperCAmelCase =np.zeros((ksize, ksize) , dtype=np.floataa ) # each value for y in range(A_ ): for x in range(A_ ): # distance from center __UpperCAmelCase =x - ksize // 2 __UpperCAmelCase =y - ksize // 2 # degree to radiant __UpperCAmelCase =theta / 180 * np.pi __UpperCAmelCase =np.cos(_theta ) __UpperCAmelCase =np.sin(_theta ) # get kernel x __UpperCAmelCase =cos_theta * px + sin_theta * py # get kernel y __UpperCAmelCase =-sin_theta * px + cos_theta * py # fill kernel __UpperCAmelCase =np.exp( -(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi ) return gabor if __name__ == "__main__": import doctest doctest.testmod() # read original image __A = imread("../image_data/lena.jpg") # turn image in gray scale value __A = cvtColor(img, COLOR_BGR2GRAY) # Apply multiple Kernel to detect edges __A = np.zeros(gray.shape[:2]) for theta in [0, 30, 60, 90, 1_20, 1_50]: __A = gabor_filter_kernel(10, 8, theta, 10, 0, 0) out += filteraD(gray, CV_8UC3, kernel_aa) __A = out / out.max() * 2_55 __A = out.astype(np.uinta) imshow("Original", gray) imshow("Gabor filter with 20x20 mask and 6 directions", out) waitKey(0)
68
0
'''simple docstring''' import importlib import math import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Tuple, Union import flax import jax.numpy as jnp from ..utils import BaseOutput A_ = "scheduler_config.json" class UpperCAmelCase ( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = 2 SCREAMING_SNAKE_CASE_ = 3 SCREAMING_SNAKE_CASE_ = 4 SCREAMING_SNAKE_CASE_ = 5 @dataclass class UpperCAmelCase ( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE_ = 42 class UpperCAmelCase : '''simple docstring''' SCREAMING_SNAKE_CASE_ = SCHEDULER_CONFIG_NAME SCREAMING_SNAKE_CASE_ = ['dtype'] SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = True @classmethod def UpperCamelCase( cls , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ) -> Optional[int]: '''simple docstring''' lowerCamelCase_ ,lowerCamelCase_ = cls.load_config( pretrained_model_name_or_path=SCREAMING_SNAKE_CASE_ , subfolder=SCREAMING_SNAKE_CASE_ , return_unused_kwargs=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) lowerCamelCase_ ,lowerCamelCase_ = cls.from_config(SCREAMING_SNAKE_CASE_ , return_unused_kwargs=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) if hasattr(SCREAMING_SNAKE_CASE_ , 'create_state' ) and getattr(SCREAMING_SNAKE_CASE_ , 'has_state' , SCREAMING_SNAKE_CASE_ ): lowerCamelCase_ = scheduler.create_state() if return_unused_kwargs: return scheduler, state, unused_kwargs return scheduler, state def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False , **SCREAMING_SNAKE_CASE_ ) -> Dict: '''simple docstring''' self.save_config(save_directory=SCREAMING_SNAKE_CASE_ , push_to_hub=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) @property def UpperCamelCase( self ) -> Tuple: '''simple docstring''' return self._get_compatibles() @classmethod def UpperCamelCase( cls ) -> Optional[Any]: '''simple docstring''' lowerCamelCase_ = list(set([cls.__name__] + cls._compatibles ) ) lowerCamelCase_ = importlib.import_module(__name__.split('.' )[0] ) lowerCamelCase_ = [ getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for c in compatible_classes_str if hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ] return compatible_classes def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> jnp.ndarray: assert len(__UpperCamelCase ) >= x.ndim return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(__UpperCamelCase ) - x.ndim) ) ,__UpperCamelCase ) def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase=0.999 ,__UpperCamelCase=jnp.floataa ) -> jnp.ndarray: def alpha_bar(__UpperCamelCase ): return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2 lowerCamelCase_ = [] for i in range(__UpperCamelCase ): lowerCamelCase_ = i / num_diffusion_timesteps lowerCamelCase_ = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar(__UpperCamelCase ) / alpha_bar(__UpperCamelCase ) ,__UpperCamelCase ) ) return jnp.array(__UpperCamelCase ,dtype=__UpperCamelCase ) @flax.struct.dataclass class UpperCAmelCase : '''simple docstring''' SCREAMING_SNAKE_CASE_ = 42 SCREAMING_SNAKE_CASE_ = 42 SCREAMING_SNAKE_CASE_ = 42 @classmethod def UpperCamelCase( cls , SCREAMING_SNAKE_CASE_ ) -> Dict: '''simple docstring''' lowerCamelCase_ = scheduler.config if config.trained_betas is not None: lowerCamelCase_ = jnp.asarray(config.trained_betas , dtype=scheduler.dtype ) elif config.beta_schedule == "linear": lowerCamelCase_ = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype ) elif config.beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. lowerCamelCase_ = ( jnp.linspace( config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype ) ** 2 ) elif config.beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule lowerCamelCase_ = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype ) else: raise NotImplementedError( f'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' ) lowerCamelCase_ = 1.0 - betas lowerCamelCase_ = jnp.cumprod(SCREAMING_SNAKE_CASE_ , axis=0 ) return cls( alphas=SCREAMING_SNAKE_CASE_ , betas=SCREAMING_SNAKE_CASE_ , alphas_cumprod=SCREAMING_SNAKE_CASE_ , ) def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Any: lowerCamelCase_ = state.alphas_cumprod lowerCamelCase_ = alphas_cumprod[timesteps] ** 0.5 lowerCamelCase_ = sqrt_alpha_prod.flatten() lowerCamelCase_ = broadcast_to_shape_from_left(__UpperCamelCase ,original_samples.shape ) lowerCamelCase_ = (1 - alphas_cumprod[timesteps]) ** 0.5 lowerCamelCase_ = sqrt_one_minus_alpha_prod.flatten() lowerCamelCase_ = broadcast_to_shape_from_left(__UpperCamelCase ,original_samples.shape ) return sqrt_alpha_prod, sqrt_one_minus_alpha_prod def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> List[str]: lowerCamelCase_ ,lowerCamelCase_ = get_sqrt_alpha_prod(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) lowerCamelCase_ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict: lowerCamelCase_ ,lowerCamelCase_ = get_sqrt_alpha_prod(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) lowerCamelCase_ = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample return velocity
42
import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _A : """simple docstring""" def __init__( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any]=13 , __SCREAMING_SNAKE_CASE : Dict=32 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Optional[int]=3 , __SCREAMING_SNAKE_CASE : List[str]=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=[1, 2, 1] , __SCREAMING_SNAKE_CASE : List[Any]=[2, 2, 4] , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : Any=2.0 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : int=0.0 , __SCREAMING_SNAKE_CASE : Dict=0.0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : List[Any]=0.02 , __SCREAMING_SNAKE_CASE : Tuple=1e-5 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Any=10 , __SCREAMING_SNAKE_CASE : Dict=8 , ) -> List[Any]: __UpperCAmelCase =parent __UpperCAmelCase =batch_size __UpperCAmelCase =image_size __UpperCAmelCase =patch_size __UpperCAmelCase =num_channels __UpperCAmelCase =embed_dim __UpperCAmelCase =depths __UpperCAmelCase =num_heads __UpperCAmelCase =window_size __UpperCAmelCase =mlp_ratio __UpperCAmelCase =qkv_bias __UpperCAmelCase =hidden_dropout_prob __UpperCAmelCase =attention_probs_dropout_prob __UpperCAmelCase =drop_path_rate __UpperCAmelCase =hidden_act __UpperCAmelCase =use_absolute_embeddings __UpperCAmelCase =patch_norm __UpperCAmelCase =layer_norm_eps __UpperCAmelCase =initializer_range __UpperCAmelCase =is_training __UpperCAmelCase =scope __UpperCAmelCase =use_labels __UpperCAmelCase =type_sequence_label_size __UpperCAmelCase =encoder_stride def _a ( self : Tuple ) -> Optional[int]: __UpperCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCAmelCase =None if self.use_labels: __UpperCAmelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase =self.get_config() return config, pixel_values, labels def _a ( self : List[Any] ) -> Optional[Any]: return SwinvaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def _a ( self : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict ) -> Optional[int]: __UpperCAmelCase =SwinvaModel(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __UpperCAmelCase =model(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) __UpperCAmelCase =int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def _a ( self : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int ) -> Tuple: __UpperCAmelCase =SwinvaForMaskedImageModeling(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __UpperCAmelCase =model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images __UpperCAmelCase =1 __UpperCAmelCase =SwinvaForMaskedImageModeling(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __UpperCAmelCase =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __UpperCAmelCase =model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple: __UpperCAmelCase =self.type_sequence_label_size __UpperCAmelCase =SwinvaForImageClassification(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __UpperCAmelCase =model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _a ( self : List[str] ) -> Tuple: __UpperCAmelCase =self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =config_and_inputs __UpperCAmelCase ={"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _A ( UpperCamelCase , UpperCamelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase : Optional[int] = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) lowerCamelCase : Tuple = ( {'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification} if is_torch_available() else {} ) lowerCamelCase : Dict = False lowerCamelCase : Tuple = False lowerCamelCase : List[str] = False lowerCamelCase : Tuple = False def _a ( self : str ) -> str: __UpperCAmelCase =SwinvaModelTester(self ) __UpperCAmelCase =ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , embed_dim=37 ) def _a ( self : List[Any] ) -> Optional[int]: self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _a ( self : str ) -> str: __UpperCAmelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) @unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" ) def _a ( self : Tuple ) -> Tuple: pass @unittest.skip(reason="""Swinv2 does not use inputs_embeds""" ) def _a ( self : Optional[Any] ) -> int: pass def _a ( self : Tuple ) -> int: __UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __UpperCAmelCase =model.get_output_embeddings() self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) ) def _a ( self : str ) -> List[str]: __UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCAmelCase =[*signature.parameters.keys()] __UpperCAmelCase =["""pixel_values"""] self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE ) def _a ( self : Tuple ) -> Tuple: __UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase =True for model_class in self.all_model_classes: __UpperCAmelCase =True __UpperCAmelCase =False __UpperCAmelCase =True __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): __UpperCAmelCase =model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) __UpperCAmelCase =outputs.attentions __UpperCAmelCase =len(self.model_tester.depths ) self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __UpperCAmelCase =True __UpperCAmelCase =config.window_size**2 __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): __UpperCAmelCase =model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) __UpperCAmelCase =outputs.attentions self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) __UpperCAmelCase =len(__SCREAMING_SNAKE_CASE ) # Check attention is always last and order is fine __UpperCAmelCase =True __UpperCAmelCase =True __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): __UpperCAmelCase =model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) if hasattr(self.model_tester , """num_hidden_states_types""" ): __UpperCAmelCase =self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states __UpperCAmelCase =2 self.assertEqual(out_len + added_hidden_states , len(__SCREAMING_SNAKE_CASE ) ) __UpperCAmelCase =outputs.attentions self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> int: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): __UpperCAmelCase =model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) __UpperCAmelCase =outputs.hidden_states __UpperCAmelCase =getattr( self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) # Swinv2 has a different seq_length __UpperCAmelCase =( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) __UpperCAmelCase =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) __UpperCAmelCase =outputs.reshaped_hidden_states self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =reshaped_hidden_states[0].shape __UpperCAmelCase =( reshaped_hidden_states[0].view(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def _a ( self : str ) -> Union[str, Any]: __UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase =( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: __UpperCAmelCase =True self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase =True self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : Optional[int] ) -> Tuple: __UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase =3 __UpperCAmelCase =( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) __UpperCAmelCase =( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) __UpperCAmelCase =image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) __UpperCAmelCase =image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: __UpperCAmelCase =True self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase =True self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (padded_height, padded_width) ) def _a ( self : Optional[int] ) -> Tuple: __UpperCAmelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__SCREAMING_SNAKE_CASE ) def _a ( self : Tuple ) -> Dict: __UpperCAmelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE ) @slow def _a ( self : int ) -> Dict: for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase =SwinvaModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) def _a ( self : Dict ) -> Union[str, Any]: __UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase =_config_zero_init(__SCREAMING_SNAKE_CASE ) for model_class in self.all_model_classes: __UpperCAmelCase =model_class(config=__SCREAMING_SNAKE_CASE ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @require_vision @require_torch class _A ( unittest.TestCase ): """simple docstring""" @cached_property def _a ( self : Tuple ) -> Dict: return ( AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ) if is_vision_available() else None ) @slow def _a ( self : int ) -> Optional[int]: __UpperCAmelCase =SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to( __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =self.default_image_processor __UpperCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) __UpperCAmelCase =image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): __UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE ) # verify the logits __UpperCAmelCase =torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =torch.tensor([-0.3_947, -0.4_306, 0.0_026] ).to(__SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
68
0
import argparse from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument( '--original_config_file', type=str, required=True, help='The YAML config file corresponding to the original architecture.', ) parser.add_argument( '--num_in_channels', default=None, type=int, help='The number of input channels. If `None` number of input channels will be automatically inferred.', ) parser.add_argument( '--image_size', default=512, type=int, help=( 'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2' ' Base. Use 768 for Stable Diffusion v2.' ), ) parser.add_argument( '--extract_ema', action='store_true', help=( 'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights' ' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield' ' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.' ), ) parser.add_argument( '--upcast_attention', action='store_true', help=( 'Whether the attention computation should always be upcasted. This is necessary when running stable' ' diffusion 2.1.' ), ) parser.add_argument( '--from_safetensors', action='store_true', help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.', ) parser.add_argument( '--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)') def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" if string == "True": return True elif string == "False": return False else: raise ValueError(f'could not parse string as bool {string}' ) parser.add_argument( '--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool ) parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int) lowerCAmelCase = parser.parse_args() lowerCAmelCase = download_controlnet_from_original_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, extract_ema=args.extract_ema, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, use_linear_projection=args.use_linear_projection, cross_attention_dim=args.cross_attention_dim, ) controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
43
import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __A = logging.get_logger(__name__) __A = {"vocab_file": "spiece.model"} __A = { "vocab_file": { "AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model", } } __A = { "AI-Sweden/gpt-sw3-126m": 20_48, "AI-Sweden/gpt-sw3-350m": 20_48, "AI-Sweden/gpt-sw3-1.6b": 20_48, "AI-Sweden/gpt-sw3-6.7b": 20_48, "AI-Sweden/gpt-sw3-20b": 20_48, } class _A ( UpperCamelCase ): """simple docstring""" lowerCamelCase : int = VOCAB_FILES_NAMES lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase : Optional[Any] = ['input_ids', 'attention_mask'] def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : Optional[Any] , ) -> None: __UpperCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs __UpperCAmelCase =kwargs.get("""name_or_path""" ) if name_or_path is None: logger.warning( """name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,""" """ you are testing the model, this can safely be ignored""" ) __UpperCAmelCase ="""None""" # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing __UpperCAmelCase ="""<|endoftext|>""" if eos_token is None else eos_token __UpperCAmelCase ="""<unk>""" if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: __UpperCAmelCase =unk_token if pad_token is None else pad_token __UpperCAmelCase =eos_token if bos_token is None else bos_token else: __UpperCAmelCase ="""<pad>""" if pad_token is None else pad_token __UpperCAmelCase ="""<s>""" if bos_token is None else bos_token super().__init__( do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , ) __UpperCAmelCase =do_lower_case __UpperCAmelCase =remove_space __UpperCAmelCase =keep_accents __UpperCAmelCase =vocab_file __UpperCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__SCREAMING_SNAKE_CASE ) # Used for whitespace normalization in input texts # fmt : off __UpperCAmelCase ={""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """„"""} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing __UpperCAmelCase =re.compile( f'''[{"".join(map(__SCREAMING_SNAKE_CASE , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]''' ) def __getstate__( self : Any ) -> str: __UpperCAmelCase =self.__dict__.copy() __UpperCAmelCase =None return state def __setstate__( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]: __UpperCAmelCase =d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): __UpperCAmelCase ={} __UpperCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def _a ( self : Union[str, Any] ) -> int: return len(self.sp_model ) def _a ( self : Dict , __SCREAMING_SNAKE_CASE : str ) -> str: __UpperCAmelCase =self.non_printing_characters_re.sub("""""" , __SCREAMING_SNAKE_CASE ) # Normalize whitespaces __UpperCAmelCase ="""""".join([char if char not in self.whitespaces else """ """ for char in text] ) # NFC Unicode normalization __UpperCAmelCase =unicodedata.normalize("""NFC""" , __SCREAMING_SNAKE_CASE ) return text def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[str]: __UpperCAmelCase =self.preprocess_text(__SCREAMING_SNAKE_CASE ) return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE ) def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str ) -> int: return self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE ) def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : int ) -> str: return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE ) @staticmethod def _a ( __SCREAMING_SNAKE_CASE : str ) -> str: return out_string def _a ( self : Any , __SCREAMING_SNAKE_CASE : List[str] ) -> str: __UpperCAmelCase =[] __UpperCAmelCase ="""""" __UpperCAmelCase =False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token __UpperCAmelCase =True __UpperCAmelCase =[] else: current_sub_tokens.append(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =False out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) return out_string def _a ( self : Any ) -> Dict[str, int]: __UpperCAmelCase ={self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _a ( self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__SCREAMING_SNAKE_CASE ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __UpperCAmelCase =os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(__SCREAMING_SNAKE_CASE , """wb""" ) as fi: __UpperCAmelCase =self.sp_model.serialized_model_proto() fi.write(__SCREAMING_SNAKE_CASE ) return (out_vocab_file,) def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : Union[str, bool] = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]: if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __UpperCAmelCase =self.preprocess_text(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =self.sp_model.encode(__SCREAMING_SNAKE_CASE ) else: __UpperCAmelCase =[self.preprocess_text(__SCREAMING_SNAKE_CASE ) for t in text] __UpperCAmelCase =self.sp_model.encode(__SCREAMING_SNAKE_CASE ) if return_tensors is True or return_tensors == "pt": __UpperCAmelCase =torch.tensor(__SCREAMING_SNAKE_CASE ) return token_ids def _a ( self : str , __SCREAMING_SNAKE_CASE : Union[int, List[int]] ) -> str: return self.sp_model.decode(__SCREAMING_SNAKE_CASE ) def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : "Conversation" ) -> List[int]: __UpperCAmelCase =[f'''User: {text}''' if is_user else f'''Bot: {text}''' for is_user, text in conversation.iter_texts()] __UpperCAmelCase =( f'''{self.eos_token}{self.bos_token}''' + f'''{self.bos_token}'''.join(__SCREAMING_SNAKE_CASE ) + f'''{self.bos_token}Bot:''' ) return self.encode(text=__SCREAMING_SNAKE_CASE )
68
0
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool UpperCAmelCase_ : Optional[int] = { 'Acehnese Arabic': 'ace_Arab', 'Acehnese Latin': 'ace_Latn', 'Mesopotamian Arabic': 'acm_Arab', 'Ta\'izzi-Adeni Arabic': 'acq_Arab', 'Tunisian Arabic': 'aeb_Arab', 'Afrikaans': 'afr_Latn', 'South Levantine Arabic': 'ajp_Arab', 'Akan': 'aka_Latn', 'Amharic': 'amh_Ethi', 'North Levantine Arabic': 'apc_Arab', 'Modern Standard Arabic': 'arb_Arab', 'Modern Standard Arabic Romanized': 'arb_Latn', 'Najdi Arabic': 'ars_Arab', 'Moroccan Arabic': 'ary_Arab', 'Egyptian Arabic': 'arz_Arab', 'Assamese': 'asm_Beng', 'Asturian': 'ast_Latn', 'Awadhi': 'awa_Deva', 'Central Aymara': 'ayr_Latn', 'South Azerbaijani': 'azb_Arab', 'North Azerbaijani': 'azj_Latn', 'Bashkir': 'bak_Cyrl', 'Bambara': 'bam_Latn', 'Balinese': 'ban_Latn', 'Belarusian': 'bel_Cyrl', 'Bemba': 'bem_Latn', 'Bengali': 'ben_Beng', 'Bhojpuri': 'bho_Deva', 'Banjar Arabic': 'bjn_Arab', 'Banjar Latin': 'bjn_Latn', 'Standard Tibetan': 'bod_Tibt', 'Bosnian': 'bos_Latn', 'Buginese': 'bug_Latn', 'Bulgarian': 'bul_Cyrl', 'Catalan': 'cat_Latn', 'Cebuano': 'ceb_Latn', 'Czech': 'ces_Latn', 'Chokwe': 'cjk_Latn', 'Central Kurdish': 'ckb_Arab', 'Crimean Tatar': 'crh_Latn', 'Welsh': 'cym_Latn', 'Danish': 'dan_Latn', 'German': 'deu_Latn', 'Southwestern Dinka': 'dik_Latn', 'Dyula': 'dyu_Latn', 'Dzongkha': 'dzo_Tibt', 'Greek': 'ell_Grek', 'English': 'eng_Latn', 'Esperanto': 'epo_Latn', 'Estonian': 'est_Latn', 'Basque': 'eus_Latn', 'Ewe': 'ewe_Latn', 'Faroese': 'fao_Latn', 'Fijian': 'fij_Latn', 'Finnish': 'fin_Latn', 'Fon': 'fon_Latn', 'French': 'fra_Latn', 'Friulian': 'fur_Latn', 'Nigerian Fulfulde': 'fuv_Latn', 'Scottish Gaelic': 'gla_Latn', 'Irish': 'gle_Latn', 'Galician': 'glg_Latn', 'Guarani': 'grn_Latn', 'Gujarati': 'guj_Gujr', 'Haitian Creole': 'hat_Latn', 'Hausa': 'hau_Latn', 'Hebrew': 'heb_Hebr', 'Hindi': 'hin_Deva', 'Chhattisgarhi': 'hne_Deva', 'Croatian': 'hrv_Latn', 'Hungarian': 'hun_Latn', 'Armenian': 'hye_Armn', 'Igbo': 'ibo_Latn', 'Ilocano': 'ilo_Latn', 'Indonesian': 'ind_Latn', 'Icelandic': 'isl_Latn', 'Italian': 'ita_Latn', 'Javanese': 'jav_Latn', 'Japanese': 'jpn_Jpan', 'Kabyle': 'kab_Latn', 'Jingpho': 'kac_Latn', 'Kamba': 'kam_Latn', 'Kannada': 'kan_Knda', 'Kashmiri Arabic': 'kas_Arab', 'Kashmiri Devanagari': 'kas_Deva', 'Georgian': 'kat_Geor', 'Central Kanuri Arabic': 'knc_Arab', 'Central Kanuri Latin': 'knc_Latn', 'Kazakh': 'kaz_Cyrl', 'Kabiyè': 'kbp_Latn', 'Kabuverdianu': 'kea_Latn', 'Khmer': 'khm_Khmr', 'Kikuyu': 'kik_Latn', 'Kinyarwanda': 'kin_Latn', 'Kyrgyz': 'kir_Cyrl', 'Kimbundu': 'kmb_Latn', 'Northern Kurdish': 'kmr_Latn', 'Kikongo': 'kon_Latn', 'Korean': 'kor_Hang', 'Lao': 'lao_Laoo', 'Ligurian': 'lij_Latn', 'Limburgish': 'lim_Latn', 'Lingala': 'lin_Latn', 'Lithuanian': 'lit_Latn', 'Lombard': 'lmo_Latn', 'Latgalian': 'ltg_Latn', 'Luxembourgish': 'ltz_Latn', 'Luba-Kasai': 'lua_Latn', 'Ganda': 'lug_Latn', 'Luo': 'luo_Latn', 'Mizo': 'lus_Latn', 'Standard Latvian': 'lvs_Latn', 'Magahi': 'mag_Deva', 'Maithili': 'mai_Deva', 'Malayalam': 'mal_Mlym', 'Marathi': 'mar_Deva', 'Minangkabau Arabic ': 'min_Arab', 'Minangkabau Latin': 'min_Latn', 'Macedonian': 'mkd_Cyrl', 'Plateau Malagasy': 'plt_Latn', 'Maltese': 'mlt_Latn', 'Meitei Bengali': 'mni_Beng', 'Halh Mongolian': 'khk_Cyrl', 'Mossi': 'mos_Latn', 'Maori': 'mri_Latn', 'Burmese': 'mya_Mymr', 'Dutch': 'nld_Latn', 'Norwegian Nynorsk': 'nno_Latn', 'Norwegian Bokmål': 'nob_Latn', 'Nepali': 'npi_Deva', 'Northern Sotho': 'nso_Latn', 'Nuer': 'nus_Latn', 'Nyanja': 'nya_Latn', 'Occitan': 'oci_Latn', 'West Central Oromo': 'gaz_Latn', 'Odia': 'ory_Orya', 'Pangasinan': 'pag_Latn', 'Eastern Panjabi': 'pan_Guru', 'Papiamento': 'pap_Latn', 'Western Persian': 'pes_Arab', 'Polish': 'pol_Latn', 'Portuguese': 'por_Latn', 'Dari': 'prs_Arab', 'Southern Pashto': 'pbt_Arab', 'Ayacucho Quechua': 'quy_Latn', 'Romanian': 'ron_Latn', 'Rundi': 'run_Latn', 'Russian': 'rus_Cyrl', 'Sango': 'sag_Latn', 'Sanskrit': 'san_Deva', 'Santali': 'sat_Olck', 'Sicilian': 'scn_Latn', 'Shan': 'shn_Mymr', 'Sinhala': 'sin_Sinh', 'Slovak': 'slk_Latn', 'Slovenian': 'slv_Latn', 'Samoan': 'smo_Latn', 'Shona': 'sna_Latn', 'Sindhi': 'snd_Arab', 'Somali': 'som_Latn', 'Southern Sotho': 'sot_Latn', 'Spanish': 'spa_Latn', 'Tosk Albanian': 'als_Latn', 'Sardinian': 'srd_Latn', 'Serbian': 'srp_Cyrl', 'Swati': 'ssw_Latn', 'Sundanese': 'sun_Latn', 'Swedish': 'swe_Latn', 'Swahili': 'swh_Latn', 'Silesian': 'szl_Latn', 'Tamil': 'tam_Taml', 'Tatar': 'tat_Cyrl', 'Telugu': 'tel_Telu', 'Tajik': 'tgk_Cyrl', 'Tagalog': 'tgl_Latn', 'Thai': 'tha_Thai', 'Tigrinya': 'tir_Ethi', 'Tamasheq Latin': 'taq_Latn', 'Tamasheq Tifinagh': 'taq_Tfng', 'Tok Pisin': 'tpi_Latn', 'Tswana': 'tsn_Latn', 'Tsonga': 'tso_Latn', 'Turkmen': 'tuk_Latn', 'Tumbuka': 'tum_Latn', 'Turkish': 'tur_Latn', 'Twi': 'twi_Latn', 'Central Atlas Tamazight': 'tzm_Tfng', 'Uyghur': 'uig_Arab', 'Ukrainian': 'ukr_Cyrl', 'Umbundu': 'umb_Latn', 'Urdu': 'urd_Arab', 'Northern Uzbek': 'uzn_Latn', 'Venetian': 'vec_Latn', 'Vietnamese': 'vie_Latn', 'Waray': 'war_Latn', 'Wolof': 'wol_Latn', 'Xhosa': 'xho_Latn', 'Eastern Yiddish': 'ydd_Hebr', 'Yoruba': 'yor_Latn', 'Yue Chinese': 'yue_Hant', 'Chinese Simplified': 'zho_Hans', 'Chinese Traditional': 'zho_Hant', 'Standard Malay': 'zsm_Latn', 'Zulu': 'zul_Latn', } class UpperCAmelCase__ ( A ): lowerCAmelCase_ = 'facebook/nllb-200-distilled-600M' lowerCAmelCase_ = ( 'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should ' 'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, ' 'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in ' 'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.' ) lowerCAmelCase_ = 'translator' lowerCAmelCase_ = AutoTokenizer lowerCAmelCase_ = AutoModelForSeqaSeqLM lowerCAmelCase_ = LANGUAGE_CODES lowerCAmelCase_ = ['text', 'text', 'text'] lowerCAmelCase_ = ['text'] def lowerCamelCase_ ( self : Optional[int],__A : str,__A : Optional[Any],__A : int ): if src_lang not in self.lang_to_code: raise ValueError(f'{src_lang} is not a supported language.' ) if tgt_lang not in self.lang_to_code: raise ValueError(f'{tgt_lang} is not a supported language.' ) _lowerCamelCase : List[str] = self.lang_to_code[src_lang] _lowerCamelCase : str = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( __A,return_tensors="pt",src_lang=__A,tgt_lang=__A ) def lowerCamelCase_ ( self : Dict,__A : str ): return self.model.generate(**__A ) def lowerCamelCase_ ( self : int,__A : Optional[Any] ): return self.post_processor.decode(outputs[0].tolist(),skip_special_tokens=__A )
44
import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow __A = logging.getLogger() @unittest.skip('Temporarily disable the doc tests.' ) @require_torch @require_tf @slow class _A ( unittest.TestCase ): """simple docstring""" def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Path , __SCREAMING_SNAKE_CASE : Union[str, None] = None , __SCREAMING_SNAKE_CASE : Union[List[str], None] = None , __SCREAMING_SNAKE_CASE : Union[str, List[str], None] = None , __SCREAMING_SNAKE_CASE : bool = True , ) -> List[str]: __UpperCAmelCase =[file for file in os.listdir(__SCREAMING_SNAKE_CASE ) if os.path.isfile(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )] if identifier is not None: __UpperCAmelCase =[file for file in files if identifier in file] if n_identifier is not None: if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): for n_ in n_identifier: __UpperCAmelCase =[file for file in files if n_ not in file] else: __UpperCAmelCase =[file for file in files if n_identifier not in file] __UpperCAmelCase =ignore_files or [] ignore_files.append("""__init__.py""" ) __UpperCAmelCase =[file for file in files if file not in ignore_files] for file in files: # Open all files print("""Testing""" , __SCREAMING_SNAKE_CASE ) if only_modules: __UpperCAmelCase =file.split(""".""" )[0] try: __UpperCAmelCase =getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =doctest.DocTestSuite(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =unittest.TextTestRunner().run(__SCREAMING_SNAKE_CASE ) self.assertIs(len(result.failures ) , 0 ) except AttributeError: logger.info(f'''{module_identifier} is not a module.''' ) else: __UpperCAmelCase =doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed , 0 ) def _a ( self : Optional[Any] ) -> List[str]: __UpperCAmelCase =Path("""src/transformers""" ) __UpperCAmelCase ="""modeling""" __UpperCAmelCase =[ """modeling_ctrl.py""", """modeling_tf_ctrl.py""", ] self.analyze_directory(__SCREAMING_SNAKE_CASE , identifier=__SCREAMING_SNAKE_CASE , ignore_files=__SCREAMING_SNAKE_CASE ) def _a ( self : Tuple ) -> Optional[int]: __UpperCAmelCase =Path("""src/transformers""" ) __UpperCAmelCase ="""tokenization""" self.analyze_directory(__SCREAMING_SNAKE_CASE , identifier=__SCREAMING_SNAKE_CASE ) def _a ( self : Optional[Any] ) -> Optional[Any]: __UpperCAmelCase =Path("""src/transformers""" ) __UpperCAmelCase ="""configuration""" self.analyze_directory(__SCREAMING_SNAKE_CASE , identifier=__SCREAMING_SNAKE_CASE ) def _a ( self : List[Any] ) -> Tuple: __UpperCAmelCase =Path("""src/transformers""" ) __UpperCAmelCase =["""configuration""", """modeling""", """tokenization"""] self.analyze_directory(__SCREAMING_SNAKE_CASE , n_identifier=__SCREAMING_SNAKE_CASE ) def _a ( self : Any ) -> Tuple: __UpperCAmelCase =Path("""docs/source""" ) __UpperCAmelCase =["""favicon.ico"""] self.analyze_directory(__SCREAMING_SNAKE_CASE , ignore_files=__SCREAMING_SNAKE_CASE , only_modules=__SCREAMING_SNAKE_CASE )
68
0
from __future__ import annotations import random import unittest from transformers import TransfoXLConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLModel, ) class lowerCAmelCase_ : """simple docstring""" def __init__( self :Optional[Any] , lowerCamelCase__ :Union[str, Any] , ): UpperCamelCase__ :Tuple = parent UpperCamelCase__ :str = 13 UpperCamelCase__ :Union[str, Any] = 7 UpperCamelCase__ :List[Any] = 30 UpperCamelCase__ :int = self.seq_length + self.mem_len UpperCamelCase__ :Tuple = 15 UpperCamelCase__ :int = True UpperCamelCase__ :int = True UpperCamelCase__ :Union[str, Any] = 99 UpperCamelCase__ :Any = [10, 50, 80] UpperCamelCase__ :List[str] = 32 UpperCamelCase__ :Optional[Any] = 32 UpperCamelCase__ :int = 4 UpperCamelCase__ :Optional[int] = 8 UpperCamelCase__ :Tuple = 1_28 UpperCamelCase__ :List[Any] = 2 UpperCamelCase__ :Optional[int] = 2 UpperCamelCase__ :Dict = None UpperCamelCase__ :List[Any] = 1 UpperCamelCase__ :Any = 0 UpperCamelCase__ :List[str] = 3 UpperCamelCase__ :Any = self.vocab_size - 1 UpperCamelCase__ :Optional[int] = 0.01 def __a ( self :Optional[Any] ): UpperCamelCase__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase__ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase__ :Any = None if self.use_labels: UpperCamelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase__ :int = TransfoXLConfig( vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , ) return (config, input_ids_a, input_ids_a, lm_labels) def __a ( self :Optional[int] ): random.seed(self.seed ) tf.random.set_seed(self.seed ) def __a ( self :Dict , lowerCamelCase__ :str , lowerCamelCase__ :Tuple , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[int] ): UpperCamelCase__ :Any = TFTransfoXLModel(lowerCamelCase__ ) UpperCamelCase__ , UpperCamelCase__ :List[str] = model(lowerCamelCase__ ).to_tuple() UpperCamelCase__ :int = {"""input_ids""": input_ids_a, """mems""": mems_a} UpperCamelCase__ , UpperCamelCase__ :str = model(lowerCamelCase__ ).to_tuple() self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def __a ( self :Any , lowerCamelCase__ :Tuple , lowerCamelCase__ :Tuple , lowerCamelCase__ :List[str] , lowerCamelCase__ :Dict ): UpperCamelCase__ :Union[str, Any] = TFTransfoXLLMHeadModel(lowerCamelCase__ ) UpperCamelCase__ , UpperCamelCase__ :str = model(lowerCamelCase__ ).to_tuple() UpperCamelCase__ :Tuple = {"""input_ids""": input_ids_a, """labels""": lm_labels} UpperCamelCase__ , UpperCamelCase__ :Any = model(lowerCamelCase__ ).to_tuple() UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = model([input_ids_a, mems_a] ).to_tuple() UpperCamelCase__ :str = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels} UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ ).to_tuple() self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def __a ( self :Optional[int] , lowerCamelCase__ :Dict , lowerCamelCase__ :int , lowerCamelCase__ :int , lowerCamelCase__ :List[Any] ): UpperCamelCase__ :Union[str, Any] = TFTransfoXLForSequenceClassification(lowerCamelCase__ ) UpperCamelCase__ :Dict = model(lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __a ( self :List[Any] ): UpperCamelCase__ :Dict = self.prepare_config_and_inputs() ((UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__)) :int = config_and_inputs UpperCamelCase__ :List[Any] = {"""input_ids""": input_ids_a} return config, inputs_dict @require_tf class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ): """simple docstring""" _snake_case : str = ( (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else () ) _snake_case : List[str] = () if is_tf_available() else () _snake_case : List[str] = ( { """feature-extraction""": TFTransfoXLModel, """text-classification""": TFTransfoXLForSequenceClassification, """text-generation""": TFTransfoXLLMHeadModel, """zero-shot""": TFTransfoXLForSequenceClassification, } if is_tf_available() else {} ) # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented _snake_case : Optional[Any] = False _snake_case : Any = False _snake_case : Tuple = False _snake_case : List[Any] = False def __a ( self :Dict , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :str ): if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def __a ( self :List[str] ): UpperCamelCase__ :Any = TFTransfoXLModelTester(self ) UpperCamelCase__ :Optional[int] = ConfigTester(self , config_class=lowerCamelCase__ , d_embed=37 ) def __a ( self :str ): self.config_tester.run_common_tests() def __a ( self :List[str] ): self.model_tester.set_seed() UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_model(*lowerCamelCase__ ) def __a ( self :str ): self.model_tester.set_seed() UpperCamelCase__ :Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_lm_head(*lowerCamelCase__ ) def __a ( self :List[Any] ): UpperCamelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*lowerCamelCase__ ) def __a ( self :Dict ): UpperCamelCase__ , UpperCamelCase__ :str = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase__ :int = [TFTransfoXLForSequenceClassification] for model_class in self.all_model_classes: UpperCamelCase__ :Dict = model_class(lowerCamelCase__ ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class in list_other_models_with_output_ebd: UpperCamelCase__ :Optional[Any] = model.get_output_embeddings() assert isinstance(lowerCamelCase__ , tf.keras.layers.Layer ) UpperCamelCase__ :List[Any] = model.get_bias() assert name is None else: UpperCamelCase__ :Union[str, Any] = model.get_output_embeddings() assert x is None UpperCamelCase__ :int = model.get_bias() assert name is None def __a ( self :int ): # TODO JP: Make TransfoXL XLA compliant pass @slow def __a ( self :int ): for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase__ :str = TFTransfoXLModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) @unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" ) def __a ( self :Union[str, Any] ): pass @require_tf class lowerCAmelCase_ ( unittest.TestCase ): """simple docstring""" @unittest.skip("""Skip test until #12651 is resolved.""" ) @slow def __a ( self :str ): UpperCamelCase__ :int = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" ) # fmt: off UpperCamelCase__ :Union[str, Any] = tf.convert_to_tensor([[33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231 # fmt: on # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the # remainder of the story . 1883 Western Siberia , # a young Grigori Rasputin is asked by his father and a group of men to perform magic . # Rasputin has a vision and denounces one of the men as a horse thief . Although his # father initially slaps him for making such an accusation , Rasputin watches as the # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . <eod> </s> <eos> # fmt: off UpperCamelCase__ :Tuple = [33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0,33,1,18_57,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,28,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,0] # noqa: E231 # fmt: on # In 1991, the remains of Russian Tsar Nicholas II and his family ( # except for Alexei and Maria ) are discovered. The voice of young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story. # 1883 Western Siberia, a young Grigori Rasputin is asked by his father # and a group of men to perform magic. Rasputin has a vision and # denounces one of the men as a horse thief. Although his father initially # slaps him for making such an accusation, Rasputin watches as the man # is chased outside and beaten. Twenty years later, Rasputin sees a vision # of the Virgin Mary, prompting him to become a priest. # Rasputin quickly becomes famous, with people, even a bishop, begging for # his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar # Nicholas II and his family were discovered. The voice of <unk> young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos> UpperCamelCase__ :str = model.generate(lowerCamelCase__ , max_length=2_00 , do_sample=lowerCamelCase__ ) self.assertListEqual(output_ids[0].numpy().tolist() , lowerCamelCase__ )
45
import random import unittest import numpy as np import transformers from transformers import is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax if is_flax_available(): import os import jax.numpy as jnp from jax import jit from transformers import AutoTokenizer, FlaxAutoModelForCausalLM from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model __A = "0.12" # assumed parallelism: 8 if is_torch_available(): import torch def lowercase__ ( A_: int , A_: Optional[Any] , A_: List[str]=None ) -> List[str]: """simple docstring""" if rng is None: __UpperCAmelCase =random.Random() __UpperCAmelCase =1 for dim in shape: total_dims *= dim __UpperCAmelCase =[] for _ in range(A_ ): values.append(rng.randint(0 , vocab_size - 1 ) ) __UpperCAmelCase =np.array(A_ , dtype=jnp.intaa ).reshape(A_ ) return output def lowercase__ ( A_: List[str] , A_: List[str]=None ) -> Any: """simple docstring""" __UpperCAmelCase =ids_tensor(A_ , vocab_size=2 , rng=A_ ) # make sure that at least one token is attended to for each batch __UpperCAmelCase =1 return attn_mask @require_flax class _A : """simple docstring""" lowerCamelCase : Optional[Any] = None lowerCamelCase : int = () def _a ( self : str ) -> Tuple: __UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() # cut to half length & take max batch_size 3 __UpperCAmelCase =2 __UpperCAmelCase =inputs["""input_ids"""].shape[-1] // 2 __UpperCAmelCase =inputs["""input_ids"""][:max_batch_size, :sequence_length] __UpperCAmelCase =jnp.ones_like(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =attention_mask[:max_batch_size, :sequence_length] # generate max 5 tokens __UpperCAmelCase =input_ids.shape[-1] + 5 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` __UpperCAmelCase =config.eos_token_id return config, input_ids, attention_mask, max_length @is_pt_flax_cross_test def _a ( self : Union[str, Any] ) -> Optional[int]: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() __UpperCAmelCase =False __UpperCAmelCase =max_length __UpperCAmelCase =0 for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model_class.__name__[4:] # Skip the "Flax" at the beginning __UpperCAmelCase =getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =pt_model_class(__SCREAMING_SNAKE_CASE ).eval() __UpperCAmelCase =load_flax_weights_in_pytorch_model(__SCREAMING_SNAKE_CASE , flax_model.params ) __UpperCAmelCase =flax_model.generate(__SCREAMING_SNAKE_CASE ).sequences __UpperCAmelCase =pt_model.generate(torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.long ) ) if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]: __UpperCAmelCase =flax_generation_outputs[:, : pt_generation_outputs.shape[-1]] self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() ) def _a ( self : Optional[int] ) -> Dict: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() __UpperCAmelCase =False __UpperCAmelCase =max_length for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =jit(model.generate ) __UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : Union[str, Any] ) -> List[str]: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() __UpperCAmelCase =True __UpperCAmelCase =max_length for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =jit(model.generate ) __UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : List[Any] ) -> Any: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() __UpperCAmelCase =False __UpperCAmelCase =max_length __UpperCAmelCase =2 for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =jit(model.generate ) __UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : Any ) -> Tuple: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() __UpperCAmelCase =False __UpperCAmelCase =max_length __UpperCAmelCase =2 __UpperCAmelCase =2 for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences ) def _a ( self : Union[str, Any] ) -> List[Any]: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() __UpperCAmelCase =True __UpperCAmelCase =max_length __UpperCAmelCase =0.8 __UpperCAmelCase =10 __UpperCAmelCase =0.3 __UpperCAmelCase =1 __UpperCAmelCase =8 __UpperCAmelCase =9 for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =jit(model.generate ) __UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : Union[str, Any] ) -> Optional[Any]: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() __UpperCAmelCase =max_length __UpperCAmelCase =1 __UpperCAmelCase =8 __UpperCAmelCase =9 for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =jit(model.generate ) __UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : Optional[int] ) -> Any: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() __UpperCAmelCase =max_length __UpperCAmelCase =2 __UpperCAmelCase =1 __UpperCAmelCase =8 __UpperCAmelCase =9 for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =jit(model.generate ) __UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : List[str] ) -> Dict: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() # pad attention mask on the left __UpperCAmelCase =attention_mask.at[(0, 0)].set(0 ) __UpperCAmelCase =False __UpperCAmelCase =max_length for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =jit(model.generate ) __UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : Dict ) -> Tuple: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() # pad attention mask on the left __UpperCAmelCase =attention_mask.at[(0, 0)].set(0 ) __UpperCAmelCase =True __UpperCAmelCase =max_length for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =jit(model.generate ) __UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : Dict ) -> Tuple: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config() # pad attention mask on the left __UpperCAmelCase =attention_mask.at[(0, 0)].set(0 ) __UpperCAmelCase =2 __UpperCAmelCase =max_length for model_class in self.all_generative_model_classes: __UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =jit(model.generate ) __UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) @require_flax class _A ( unittest.TestCase ): """simple docstring""" def _a ( self : int ) -> Any: __UpperCAmelCase =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-bert""" ) __UpperCAmelCase =FlaxAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" ) __UpperCAmelCase ="""Hello world""" __UpperCAmelCase =tokenizer(__SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_ids # typos are quickly detected (the correct argument is `do_sample`) with self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , """do_samples""" ): model.generate(__SCREAMING_SNAKE_CASE , do_samples=__SCREAMING_SNAKE_CASE ) # arbitrary arguments that will not be used anywhere are also not accepted with self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , """foo""" ): __UpperCAmelCase ={"""foo""": """bar"""} model.generate(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
68
0
"""simple docstring""" import argparse import glob import importlib.util import os import re import black from doc_builder.style_doc import style_docstrings_in_code # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py _lowerCAmelCase : str = '''src/diffusers''' _lowerCAmelCase : List[Any] = '''.''' # This is to make sure the diffusers module imported is the one in the repo. _lowerCAmelCase : Union[str, Any] = importlib.util.spec_from_file_location( '''diffusers''', os.path.join(DIFFUSERS_PATH, '''__init__.py'''), submodule_search_locations=[DIFFUSERS_PATH], ) _lowerCAmelCase : Union[str, Any] = spec.loader.load_module() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> List[Any]: '''simple docstring''' return line.startswith(_lowerCamelCase ) or len(_lowerCamelCase ) <= 1 or re.search(R"^\s*\)(\s*->.*:|:)\s*$" , _lowerCamelCase ) is not None def lowerCamelCase_( _lowerCamelCase ) -> Union[str, Any]: '''simple docstring''' _lowerCamelCase : Any = object_name.split("." ) _lowerCamelCase : Tuple = 0 # First let's find the module where our object lives. _lowerCamelCase : Dict = parts[i] while i < len(_lowerCamelCase ) and not os.path.isfile(os.path.join(_lowerCamelCase , F"""{module}.py""" ) ): i += 1 if i < len(_lowerCamelCase ): _lowerCamelCase : Dict = os.path.join(_lowerCamelCase , parts[i] ) if i >= len(_lowerCamelCase ): raise ValueError(F"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" ) with open(os.path.join(_lowerCamelCase , F"""{module}.py""" ) , "r" , encoding="utf-8" , newline="\n" ) as f: _lowerCamelCase : Any = f.readlines() # Now let's find the class / func in the code! _lowerCamelCase : Tuple = "" _lowerCamelCase : List[str] = 0 for name in parts[i + 1 :]: while ( line_index < len(_lowerCamelCase ) and re.search(RF"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None ): line_index += 1 indent += " " line_index += 1 if line_index >= len(_lowerCamelCase ): raise ValueError(F""" {object_name} does not match any function or class in {module}.""" ) # We found the beginning of the class / func, now let's find the end (when the indent diminishes). _lowerCamelCase : Optional[Any] = line_index while line_index < len(_lowerCamelCase ) and _should_continue(lines[line_index] , _lowerCamelCase ): line_index += 1 # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 _lowerCamelCase : int = lines[start_index:line_index] return "".join(_lowerCamelCase ) _lowerCAmelCase : Optional[int] = re.compile(R'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''') _lowerCAmelCase : str = re.compile(R'''^\s*(\S+)->(\S+)(\s+.*|$)''') _lowerCAmelCase : List[Any] = re.compile(R'''<FILL\s+[^>]*>''') def lowerCamelCase_( _lowerCamelCase ) -> Union[str, Any]: '''simple docstring''' _lowerCamelCase : Tuple = code.split("\n" ) _lowerCamelCase : Optional[Any] = 0 while idx < len(_lowerCamelCase ) and len(lines[idx] ) == 0: idx += 1 if idx < len(_lowerCamelCase ): return re.search(R"^(\s*)\S" , lines[idx] ).groups()[0] return "" def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]: '''simple docstring''' _lowerCamelCase : Optional[Any] = len(get_indent(_lowerCamelCase ) ) > 0 if has_indent: _lowerCamelCase : Any = F"""class Bla:\n{code}""" _lowerCamelCase : Any = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=_lowerCamelCase ) _lowerCamelCase : List[str] = black.format_str(_lowerCamelCase , mode=_lowerCamelCase ) _lowerCamelCase, _lowerCamelCase : Optional[Any] = style_docstrings_in_code(_lowerCamelCase ) return result[len("class Bla:\n" ) :] if has_indent else result def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> List[str]: '''simple docstring''' with open(_lowerCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f: _lowerCamelCase : Union[str, Any] = f.readlines() _lowerCamelCase : Tuple = [] _lowerCamelCase : List[str] = 0 # Not a for loop cause `lines` is going to change (if `overwrite=True`). while line_index < len(_lowerCamelCase ): _lowerCamelCase : Tuple = _re_copy_warning.search(lines[line_index] ) if search is None: line_index += 1 continue # There is some copied code here, let's retrieve the original. _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Any = search.groups() _lowerCamelCase : Dict = find_code_in_diffusers(_lowerCamelCase ) _lowerCamelCase : Optional[int] = get_indent(_lowerCamelCase ) _lowerCamelCase : List[Any] = line_index + 1 if indent == theoretical_indent else line_index + 2 _lowerCamelCase : Any = theoretical_indent _lowerCamelCase : Optional[Any] = start_index # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. _lowerCamelCase : Any = True while line_index < len(_lowerCamelCase ) and should_continue: line_index += 1 if line_index >= len(_lowerCamelCase ): break _lowerCamelCase : int = lines[line_index] _lowerCamelCase : Optional[int] = _should_continue(_lowerCamelCase , _lowerCamelCase ) and re.search(F"""^{indent}# End copy""" , _lowerCamelCase ) is None # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 _lowerCamelCase : Optional[int] = lines[start_index:line_index] _lowerCamelCase : List[Any] = "".join(_lowerCamelCase ) # Remove any nested `Copied from` comments to avoid circular copies _lowerCamelCase : Union[str, Any] = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(_lowerCamelCase ) is None] _lowerCamelCase : Optional[Any] = "\n".join(_lowerCamelCase ) # Before comparing, use the `replace_pattern` on the original code. if len(_lowerCamelCase ) > 0: _lowerCamelCase : int = replace_pattern.replace("with" , "" ).split("," ) _lowerCamelCase : Dict = [_re_replace_pattern.search(_lowerCamelCase ) for p in patterns] for pattern in patterns: if pattern is None: continue _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Any = pattern.groups() _lowerCamelCase : Optional[Any] = re.sub(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) if option.strip() == "all-casing": _lowerCamelCase : Dict = re.sub(obja.lower() , obja.lower() , _lowerCamelCase ) _lowerCamelCase : List[Any] = re.sub(obja.upper() , obja.upper() , _lowerCamelCase ) # Blackify after replacement. To be able to do that, we need the header (class or function definition) # from the previous line _lowerCamelCase : Dict = blackify(lines[start_index - 1] + theoretical_code ) _lowerCamelCase : Any = theoretical_code[len(lines[start_index - 1] ) :] # Test for a diff and act accordingly. if observed_code != theoretical_code: diffs.append([object_name, start_index] ) if overwrite: _lowerCamelCase : List[Any] = lines[:start_index] + [theoretical_code] + lines[line_index:] _lowerCamelCase : List[str] = start_index + 1 if overwrite and len(_lowerCamelCase ) > 0: # Warn the user a file has been modified. print(F"""Detected changes, rewriting {filename}.""" ) with open(_lowerCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f: f.writelines(_lowerCamelCase ) return diffs def lowerCamelCase_( _lowerCamelCase = False ) -> Dict: '''simple docstring''' _lowerCamelCase : List[Any] = glob.glob(os.path.join(_lowerCamelCase , "**/*.py" ) , recursive=_lowerCamelCase ) _lowerCamelCase : Dict = [] for filename in all_files: _lowerCamelCase : int = is_copy_consistent(_lowerCamelCase , _lowerCamelCase ) diffs += [F"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs] if not overwrite and len(_lowerCamelCase ) > 0: _lowerCamelCase : Optional[int] = "\n".join(_lowerCamelCase ) raise Exception( "Found the following copy inconsistencies:\n" + diff + "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." ) if __name__ == "__main__": _lowerCAmelCase : List[str] = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') _lowerCAmelCase : Union[str, Any] = parser.parse_args() check_copies(args.fix_and_overwrite)
46
from __future__ import annotations from collections.abc import Iterator class _A : """simple docstring""" def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> None: __UpperCAmelCase =value __UpperCAmelCase =None __UpperCAmelCase =None class _A : """simple docstring""" def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Node ) -> None: __UpperCAmelCase =tree def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Node | None ) -> int: if node is None: return 0 return node.value + ( self.depth_first_search(node.left ) + self.depth_first_search(node.right ) ) def __iter__( self : int ) -> Iterator[int]: yield self.depth_first_search(self.tree ) if __name__ == "__main__": import doctest doctest.testmod()
68
0
import logging import re import pytorch_quantization import pytorch_quantization.nn as quant_nn import torch from pytorch_quantization import calib from pytorch_quantization.tensor_quant import QuantDescriptor SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__) SCREAMING_SNAKE_CASE__ = 50 # max width of layer names SCREAMING_SNAKE_CASE__ = 70 # max width of quantizer names def UpperCAmelCase__ ( lowerCamelCase_ : Dict ): __a : int = parser.add_argument_group('quant_trainer arguments' ) group.add_argument('--wprec' , type=lowerCamelCase_ , default=8 , help='weight precision' ) group.add_argument('--aprec' , type=lowerCamelCase_ , default=8 , help='activation precision' ) group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' ) group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' ) group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' ) group.add_argument('--quant-disable-keyword' , type=lowerCamelCase_ , nargs='+' , help='disable quantizers by keyword' ) group.add_argument('--quant-disable-layer-module' , type=lowerCamelCase_ , help='disable quantizers by keyword under layer.' ) group.add_argument('--quant-enable-layer-module' , type=lowerCamelCase_ , help='enable quantizers by keyword under layer' ) group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' ) group.add_argument('--percentile' , default=lowerCamelCase_ , type=lowerCamelCase_ , help='percentile for PercentileCalibrator' ) group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' ) group.add_argument('--clip-gelu' , metavar='N' , type=lowerCamelCase_ , help='clip gelu output maximum value to N' ) group.add_argument( '--recalibrate-weights' , action='store_true' , help=( 'recalibrate weight amaxes by taking the max of the weights.' ' amaxes will be computed with the current quantization granularity (axis).' ) , ) def UpperCAmelCase__ ( lowerCamelCase_ : Dict ): if args.calibrator == "max": __a : str = 'max' elif args.calibrator == "percentile": if args.percentile is None: raise ValueError('Specify --percentile when using percentile calibrator' ) __a : int = 'histogram' elif args.calibrator == "mse": __a : List[str] = 'histogram' else: raise ValueError(f'''Invalid calibrator {args.calibrator}''' ) __a : Optional[Any] = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCamelCase_ ) __a : Optional[Any] = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) ) quant_nn.QuantLinear.set_default_quant_desc_input(lowerCamelCase_ ) quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCamelCase_ ) def UpperCAmelCase__ ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[Any]=False , lowerCamelCase_ : Dict=False ): logger.info('Configuring Model for Quantization' ) logger.info(f'''using quantization package {pytorch_quantization.__file__}''' ) if not calib: if args.quant_disable_embeddings: set_quantizer_by_name(lowerCamelCase_ , ['embeddings'] , which='weight' , _disabled=lowerCamelCase_ ) if args.quant_disable: set_quantizer_by_name(lowerCamelCase_ , [''] , _disabled=lowerCamelCase_ ) if args.quant_disable_keyword: set_quantizer_by_name(lowerCamelCase_ , args.quant_disable_keyword , _disabled=lowerCamelCase_ ) if args.quant_disable_layer_module: set_quantizer_by_name(lowerCamelCase_ , [R'layer.\d+.' + args.quant_disable_layer_module] , _disabled=lowerCamelCase_ ) if args.quant_enable_layer_module: set_quantizer_by_name(lowerCamelCase_ , [R'layer.\d+.' + args.quant_enable_layer_module] , _disabled=lowerCamelCase_ ) if args.recalibrate_weights: recalibrate_weights(lowerCamelCase_ ) if args.fuse_qkv: fuse_qkv(lowerCamelCase_ , lowerCamelCase_ ) if args.clip_gelu: clip_gelu(lowerCamelCase_ , args.clip_gelu ) # if args.local_rank in [-1, 0] and not calib: print_quant_summary(lowerCamelCase_ ) def UpperCAmelCase__ ( lowerCamelCase_ : str ): logger.info('Enabling Calibration' ) for name, module in model.named_modules(): if name.endswith('_quantizer' ): if module._calibrator is not None: module.disable_quant() module.enable_calib() else: module.disable() logger.info(f'''{name:80}: {module}''' ) def UpperCAmelCase__ ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Dict ): logger.info('Loading calibrated amax' ) for name, module in model.named_modules(): if name.endswith('_quantizer' ): if module._calibrator is not None: if isinstance(module._calibrator , calib.MaxCalibrator ): module.load_calib_amax() else: module.load_calib_amax('percentile' , percentile=args.percentile ) module.enable_quant() module.disable_calib() else: module.enable() model.cuda() print_quant_summary(lowerCamelCase_ ) def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] ): def fusea(lowerCamelCase_ : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[int] ): for mod in [qq, qk, qv]: if not hasattr(lowerCamelCase_ , '_amax' ): print(' WARNING: NO AMAX BUFFER' ) return __a : Any = qq._amax.detach().item() __a : Union[str, Any] = qk._amax.detach().item() __a : int = qv._amax.detach().item() __a : List[Any] = max(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) qq._amax.fill_(lowerCamelCase_ ) qk._amax.fill_(lowerCamelCase_ ) qv._amax.fill_(lowerCamelCase_ ) logger.info(f''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' ) for name, mod in model.named_modules(): if name.endswith('.attention.self' ): logger.info(f'''FUSE_QKV: {name:{name_width}}''' ) fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer ) if args.quant_per_tensor: fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer ) def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str ): for name, mod in model.named_modules(): if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ): __a : Dict = mod._input_quantizer._amax.data.detach().item() mod._input_quantizer._amax.data.detach().clamp_(max=lowerCamelCase_ ) __a : List[str] = mod._input_quantizer._amax.data.detach().item() logger.info(f'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' ) def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] ): for name, mod in model.named_modules(): if hasattr(lowerCamelCase_ , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None: __a : Optional[int] = mod.weight.shape[0] __a : List[str] = mod._weight_quantizer._amax.detach() __a : List[Any] = torch.ones(lowerCamelCase_ , dtype=amax.dtype , device=amax.device ) * amax print(f'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' ) def UpperCAmelCase__ ( lowerCamelCase_ : Tuple ): for name, mod in model.named_modules(): if hasattr(lowerCamelCase_ , '_weight_quantizer' ): if not hasattr(mod.weight_quantizer , '_amax' ): print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' ) continue # determine which axes to reduce across # e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3) __a : Tuple = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis ) __a : str = set(range(len(mod.weight.size() ) ) ) - axis_set __a : Union[str, Any] = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCamelCase_ , keepdims=lowerCamelCase_ ).detach() logger.info(f'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' ) __a : Union[str, Any] = amax def UpperCAmelCase__ ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[Any]=2_5 , lowerCamelCase_ : Dict=1_8_0 , lowerCamelCase_ : int=None ): if ignore is None: __a : Any = [] elif not isinstance(lowerCamelCase_ , lowerCamelCase_ ): __a : int = [ignore] __a : Optional[int] = 0 for name, mod in model.named_modules(): if not hasattr(lowerCamelCase_ , 'weight' ): continue __a : List[str] = max(lowerCamelCase_ , len(lowerCamelCase_ ) ) for name, mod in model.named_modules(): __a : Optional[Any] = getattr(lowerCamelCase_ , '_input_quantizer' , lowerCamelCase_ ) __a : int = getattr(lowerCamelCase_ , '_weight_quantizer' , lowerCamelCase_ ) if not hasattr(lowerCamelCase_ , 'weight' ): continue if type(lowerCamelCase_ ) in ignore: continue if [True for s in ignore if type(lowerCamelCase_ ) is str and s in name]: continue __a : Any = f'''Act:{input_q.extra_repr()}''' __a : str = f'''Wgt:{weight_q.extra_repr()}''' __a : Optional[int] = f'''{name:{name_width}} {act_str} {wgt_str}''' if len(lowerCamelCase_ ) <= line_width: logger.info(lowerCamelCase_ ) else: logger.info(f'''{name:{name_width}} {act_str}''' ) logger.info(f'''{" ":{name_width}} {wgt_str}''' ) def UpperCAmelCase__ ( lowerCamelCase_ : List[str] ): __a : Optional[int] = 0 for name, mod in model.named_modules(): if isinstance(lowerCamelCase_ , pytorch_quantization.nn.TensorQuantizer ): print(f'''{name:80} {mod}''' ) count += 1 print(f'''{count} TensorQuantizers found in model''' ) def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str ): __a : List[Any] = getattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) if quantizer_mod is not None: assert hasattr(lowerCamelCase_ , lowerCamelCase_ ) setattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) else: logger.warning(f'''{name} has no {quantizer}''' ) def UpperCAmelCase__ ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any="both" , **lowerCamelCase_ : Any ): __a : Union[str, Any] = f'''Warning: changing {which} quantizers of {name:{qname_width}}''' for k, v in kwargs.items(): s += f''' {k}={v}''' if which in ["input", "both"]: set_quantizer(lowerCamelCase_ , lowerCamelCase_ , '_input_quantizer' , lowerCamelCase_ , lowerCamelCase_ ) if which in ["weight", "both"]: set_quantizer(lowerCamelCase_ , lowerCamelCase_ , '_weight_quantizer' , lowerCamelCase_ , lowerCamelCase_ ) logger.info(lowerCamelCase_ ) def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple , **lowerCamelCase_ : List[str] ): for name, mod in model.named_modules(): if hasattr(lowerCamelCase_ , '_input_quantizer' ) or hasattr(lowerCamelCase_ , '_weight_quantizer' ): for n in names: if re.search(lowerCamelCase_ , lowerCamelCase_ ): set_quantizers(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) elif name.endswith('_quantizer' ): for n in names: if re.search(lowerCamelCase_ , lowerCamelCase_ ): __a : Dict = f'''Warning: changing {name:{name_width}}''' for k, v in kwargs.items(): s += f''' {k}={v}''' setattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) logger.info(lowerCamelCase_ )
47
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType from ...utils.imports import is_botoa_available from .config_args import SageMakerConfig from .config_utils import ( DYNAMO_BACKENDS, _ask_field, _ask_options, _convert_dynamo_backend, _convert_mixed_precision, _convert_sagemaker_distributed_mode, _convert_yes_no_to_bool, ) if is_botoa_available(): import botoa # noqa: F401 def lowercase__ ( A_: Union[str, Any] ) -> List[Any]: """simple docstring""" __UpperCAmelCase =botoa.client("""iam""" ) __UpperCAmelCase ={ """Version""": """2012-10-17""", """Statement""": [ {"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""} ], } try: # create the role, associated with the chosen trust policy iam_client.create_role( RoleName=A_ , AssumeRolePolicyDocument=json.dumps(A_ , indent=2 ) ) __UpperCAmelCase ={ """Version""": """2012-10-17""", """Statement""": [ { """Effect""": """Allow""", """Action""": [ """sagemaker:*""", """ecr:GetDownloadUrlForLayer""", """ecr:BatchGetImage""", """ecr:BatchCheckLayerAvailability""", """ecr:GetAuthorizationToken""", """cloudwatch:PutMetricData""", """cloudwatch:GetMetricData""", """cloudwatch:GetMetricStatistics""", """cloudwatch:ListMetrics""", """logs:CreateLogGroup""", """logs:CreateLogStream""", """logs:DescribeLogStreams""", """logs:PutLogEvents""", """logs:GetLogEvents""", """s3:CreateBucket""", """s3:ListBucket""", """s3:GetBucketLocation""", """s3:GetObject""", """s3:PutObject""", ], """Resource""": """*""", } ], } # attach policy to role iam_client.put_role_policy( RoleName=A_ , PolicyName=F'''{role_name}_policy_permission''' , PolicyDocument=json.dumps(A_ , indent=2 ) , ) except iam_client.exceptions.EntityAlreadyExistsException: print(F'''role {role_name} already exists. Using existing one''' ) def lowercase__ ( A_: Dict ) -> Any: """simple docstring""" __UpperCAmelCase =botoa.client("""iam""" ) return iam_client.get_role(RoleName=A_ )["Role"]["Arn"] def lowercase__ ( ) -> Union[str, Any]: """simple docstring""" __UpperCAmelCase =_ask_options( """How do you want to authorize?""" , ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """] , A_ , ) __UpperCAmelCase =None if credentials_configuration == 0: __UpperCAmelCase =_ask_field("""Enter your AWS Profile name: [default] """ , default="""default""" ) __UpperCAmelCase =aws_profile else: print( """Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,""" """`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" ) __UpperCAmelCase =_ask_field("""AWS Access Key ID: """ ) __UpperCAmelCase =aws_access_key_id __UpperCAmelCase =_ask_field("""AWS Secret Access Key: """ ) __UpperCAmelCase =aws_secret_access_key __UpperCAmelCase =_ask_field("""Enter your AWS Region: [us-east-1]""" , default="""us-east-1""" ) __UpperCAmelCase =aws_region __UpperCAmelCase =_ask_options( """Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""" , ["""Provide IAM Role name""", """Create new IAM role using credentials"""] , A_ , ) if role_management == 0: __UpperCAmelCase =_ask_field("""Enter your IAM role name: """ ) else: __UpperCAmelCase ="""accelerate_sagemaker_execution_role""" print(F'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' ) _create_iam_role_for_sagemaker(A_ ) __UpperCAmelCase =_ask_field( """Do you want to use custom Docker image? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , ) __UpperCAmelCase =None if is_custom_docker_image: __UpperCAmelCase =_ask_field("""Enter your Docker image: """ , lambda A_ : str(A_ ).lower() ) __UpperCAmelCase =_ask_field( """Do you want to provide SageMaker input channels with data locations? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , ) __UpperCAmelCase =None if is_sagemaker_inputs_enabled: __UpperCAmelCase =_ask_field( """Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """ , lambda A_ : str(A_ ).lower() , ) __UpperCAmelCase =_ask_field( """Do you want to enable SageMaker metrics? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , ) __UpperCAmelCase =None if is_sagemaker_metrics_enabled: __UpperCAmelCase =_ask_field( """Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """ , lambda A_ : str(A_ ).lower() , ) __UpperCAmelCase =_ask_options( """What is the distributed mode?""" , ["""No distributed training""", """Data parallelism"""] , _convert_sagemaker_distributed_mode , ) __UpperCAmelCase ={} __UpperCAmelCase =_ask_field( """Do you wish to optimize your script with torch dynamo?[yes/NO]:""" , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , ) if use_dynamo: __UpperCAmelCase ="""dynamo_""" __UpperCAmelCase =_ask_options( """Which dynamo backend would you like to use?""" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , ) __UpperCAmelCase =_ask_field( """Do you want to customize the defaults sent to torch.compile? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , ) if use_custom_options: __UpperCAmelCase =_ask_options( """Which mode do you want to use?""" , A_ , lambda A_ : TORCH_DYNAMO_MODES[int(A_ )] , default="""default""" , ) __UpperCAmelCase =_ask_field( """Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , ) __UpperCAmelCase =_ask_field( """Do you want to enable dynamic shape tracing? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , ) __UpperCAmelCase ="""Which EC2 instance type you want to use for your training?""" if distributed_type != SageMakerDistributedType.NO: __UpperCAmelCase =_ask_options( A_ , A_ , lambda A_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(A_ )] ) else: eca_instance_query += "? [ml.p3.2xlarge]:" __UpperCAmelCase =_ask_field(A_ , lambda A_ : str(A_ ).lower() , default="""ml.p3.2xlarge""" ) __UpperCAmelCase =1 if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL): __UpperCAmelCase =_ask_field( """How many machines do you want use? [1]: """ , A_ , default=1 , ) __UpperCAmelCase =_ask_options( """Do you wish to use FP16 or BF16 (mixed precision)?""" , ["""no""", """fp16""", """bf16""", """fp8"""] , _convert_mixed_precision , ) if use_dynamo and mixed_precision == "no": print( """Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" ) return SageMakerConfig( image_uri=A_ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=A_ , use_cpu=A_ , dynamo_config=A_ , eca_instance_type=A_ , profile=A_ , region=A_ , iam_role_name=A_ , mixed_precision=A_ , num_machines=A_ , sagemaker_inputs_file=A_ , sagemaker_metrics_file=A_ , )
68
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase__ : Any = { "configuration_time_series_transformer": [ "TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimeSeriesTransformerConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Optional[Any] = [ "TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TimeSeriesTransformerForPrediction", "TimeSeriesTransformerModel", "TimeSeriesTransformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys UpperCAmelCase__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
48
from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"} class _A ( UpperCamelCase ): """simple docstring""" lowerCamelCase : Tuple = 'ctrl' lowerCamelCase : Any = ['past_key_values'] lowerCamelCase : Optional[int] = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=246534 , __SCREAMING_SNAKE_CASE : int=256 , __SCREAMING_SNAKE_CASE : Optional[Any]=1280 , __SCREAMING_SNAKE_CASE : Optional[Any]=8192 , __SCREAMING_SNAKE_CASE : int=48 , __SCREAMING_SNAKE_CASE : Union[str, Any]=16 , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=1e-6 , __SCREAMING_SNAKE_CASE : List[str]=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , **__SCREAMING_SNAKE_CASE : int , ) -> Any: __UpperCAmelCase =vocab_size __UpperCAmelCase =n_positions __UpperCAmelCase =n_embd __UpperCAmelCase =n_layer __UpperCAmelCase =n_head __UpperCAmelCase =dff __UpperCAmelCase =resid_pdrop __UpperCAmelCase =embd_pdrop __UpperCAmelCase =layer_norm_epsilon __UpperCAmelCase =initializer_range __UpperCAmelCase =use_cache super().__init__(**__SCREAMING_SNAKE_CASE )
68
0
"""simple docstring""" from __future__ import annotations from random import random from typing import Generic, TypeVar _lowercase : Optional[int] = TypeVar('KT') _lowercase : Optional[int] = TypeVar('VT') class _UpperCAmelCase ( Generic[KT, VT] ): def __init__( self : List[Any] , _lowercase : KT | str = "root" , _lowercase : VT | None = None ): __UpperCAmelCase = key __UpperCAmelCase = value __UpperCAmelCase = [] def __repr__( self : str ): return F'''Node({self.key}: {self.value})''' @property def a ( self : Union[str, Any] ): return len(self.forward ) class _UpperCAmelCase ( Generic[KT, VT] ): def __init__( self : int , _lowercase : float = 0.5 , _lowercase : int = 16 ): __UpperCAmelCase = Node[KT, VT]() __UpperCAmelCase = 0 __UpperCAmelCase = p __UpperCAmelCase = max_level def __str__( self : Dict ): __UpperCAmelCase = list(self ) if len(_lowercase ) == 0: return F'''SkipList(level={self.level})''' __UpperCAmelCase = max((len(str(_lowercase ) ) for item in items) , default=4 ) __UpperCAmelCase = max(_lowercase , 4 ) + 4 __UpperCAmelCase = self.head __UpperCAmelCase = [] __UpperCAmelCase = node.forward.copy() lines.append(F'''[{node.key}]'''.ljust(_lowercase , '''-''' ) + '''* ''' * len(_lowercase ) ) lines.append(''' ''' * label_size + '''| ''' * len(_lowercase ) ) while len(node.forward ) != 0: __UpperCAmelCase = node.forward[0] lines.append( F'''[{node.key}]'''.ljust(_lowercase , '''-''' ) + ''' '''.join(str(n.key ) if n.key == node.key else '''|''' for n in forwards ) ) lines.append(''' ''' * label_size + '''| ''' * len(_lowercase ) ) __UpperCAmelCase = node.forward lines.append('''None'''.ljust(_lowercase ) + '''* ''' * len(_lowercase ) ) return F'''SkipList(level={self.level})\n''' + "\n".join(_lowercase ) def __iter__( self : int ): __UpperCAmelCase = self.head while len(node.forward ) != 0: yield node.forward[0].key __UpperCAmelCase = node.forward[0] def a ( self : Optional[Any] ): __UpperCAmelCase = 1 while random() < self.p and level < self.max_level: level += 1 return level def a ( self : Optional[int] , _lowercase : Tuple ): __UpperCAmelCase = [] __UpperCAmelCase = self.head for i in reversed(range(self.level ) ): # i < node.level - When node level is lesser than `i` decrement `i`. # node.forward[i].key < key - Jumping to node with key value higher # or equal to searched key would result # in skipping searched key. while i < node.level and node.forward[i].key < key: __UpperCAmelCase = node.forward[i] # Each leftmost node (relative to searched node) will potentially have to # be updated. update_vector.append(_lowercase ) update_vector.reverse() # Note that we were inserting values in reverse order. # len(node.forward) != 0 - If current node doesn't contain any further # references then searched key is not present. # node.forward[0].key == key - Next node key should be equal to search key # if key is present. if len(node.forward ) != 0 and node.forward[0].key == key: return node.forward[0], update_vector else: return None, update_vector def a ( self : Optional[Any] , _lowercase : KT ): __UpperCAmelCase , __UpperCAmelCase = self._locate_node(_lowercase ) if node is not None: for i, update_node in enumerate(_lowercase ): # Remove or replace all references to removed node. if update_node.level > i and update_node.forward[i].key == key: if node.level > i: __UpperCAmelCase = node.forward[i] else: __UpperCAmelCase = update_node.forward[:i] def a ( self : Union[str, Any] , _lowercase : KT , _lowercase : VT ): __UpperCAmelCase , __UpperCAmelCase = self._locate_node(_lowercase ) if node is not None: __UpperCAmelCase = value else: __UpperCAmelCase = self.random_level() if level > self.level: # After level increase we have to add additional nodes to head. for _ in range(self.level - 1 , _lowercase ): update_vector.append(self.head ) __UpperCAmelCase = level __UpperCAmelCase = Node(_lowercase , _lowercase ) for i, update_node in enumerate(update_vector[:level] ): # Change references to pass through new node. if update_node.level > i: new_node.forward.append(update_node.forward[i] ) if update_node.level < i + 1: update_node.forward.append(_lowercase ) else: __UpperCAmelCase = new_node def a ( self : List[Any] , _lowercase : VT ): __UpperCAmelCase , __UpperCAmelCase = self._locate_node(_lowercase ) if node is not None: return node.value return None def lowercase__ ( ): __UpperCAmelCase = SkipList() skip_list.insert('''Key1''' , 3 ) skip_list.insert('''Key2''' , 12 ) skip_list.insert('''Key3''' , 41 ) skip_list.insert('''Key4''' , -19 ) __UpperCAmelCase = skip_list.head __UpperCAmelCase = {} while node.level != 0: __UpperCAmelCase = node.forward[0] __UpperCAmelCase = node.value assert len(snake_case_ ) == 4 assert all_values["Key1"] == 3 assert all_values["Key2"] == 12 assert all_values["Key3"] == 41 assert all_values["Key4"] == -19 def lowercase__ ( ): __UpperCAmelCase = SkipList() skip_list.insert('''Key1''' , 10 ) skip_list.insert('''Key1''' , 12 ) skip_list.insert('''Key5''' , 7 ) skip_list.insert('''Key7''' , 10 ) skip_list.insert('''Key10''' , 5 ) skip_list.insert('''Key7''' , 7 ) skip_list.insert('''Key5''' , 5 ) skip_list.insert('''Key10''' , 10 ) __UpperCAmelCase = skip_list.head __UpperCAmelCase = {} while node.level != 0: __UpperCAmelCase = node.forward[0] __UpperCAmelCase = node.value if len(snake_case_ ) != 4: print() assert len(snake_case_ ) == 4 assert all_values["Key1"] == 12 assert all_values["Key7"] == 7 assert all_values["Key5"] == 5 assert all_values["Key10"] == 10 def lowercase__ ( ): __UpperCAmelCase = SkipList() assert skip_list.find('''Some key''' ) is None def lowercase__ ( ): __UpperCAmelCase = SkipList() skip_list.insert('''Key2''' , 20 ) assert skip_list.find('''Key2''' ) == 20 skip_list.insert('''Some Key''' , 10 ) skip_list.insert('''Key2''' , 8 ) skip_list.insert('''V''' , 13 ) assert skip_list.find('''Y''' ) is None assert skip_list.find('''Key2''' ) == 8 assert skip_list.find('''Some Key''' ) == 10 assert skip_list.find('''V''' ) == 13 def lowercase__ ( ): __UpperCAmelCase = SkipList() skip_list.delete('''Some key''' ) assert len(skip_list.head.forward ) == 0 def lowercase__ ( ): __UpperCAmelCase = SkipList() skip_list.insert('''Key1''' , 12 ) skip_list.insert('''V''' , 13 ) skip_list.insert('''X''' , 14 ) skip_list.insert('''Key2''' , 15 ) skip_list.delete('''V''' ) skip_list.delete('''Key2''' ) assert skip_list.find('''V''' ) is None assert skip_list.find('''Key2''' ) is None def lowercase__ ( ): __UpperCAmelCase = SkipList() skip_list.insert('''Key1''' , 12 ) skip_list.insert('''V''' , 13 ) skip_list.insert('''X''' , 14 ) skip_list.insert('''Key2''' , 15 ) skip_list.delete('''V''' ) assert skip_list.find('''V''' ) is None assert skip_list.find('''X''' ) == 14 assert skip_list.find('''Key1''' ) == 12 assert skip_list.find('''Key2''' ) == 15 skip_list.delete('''X''' ) assert skip_list.find('''V''' ) is None assert skip_list.find('''X''' ) is None assert skip_list.find('''Key1''' ) == 12 assert skip_list.find('''Key2''' ) == 15 skip_list.delete('''Key1''' ) assert skip_list.find('''V''' ) is None assert skip_list.find('''X''' ) is None assert skip_list.find('''Key1''' ) is None assert skip_list.find('''Key2''' ) == 15 skip_list.delete('''Key2''' ) assert skip_list.find('''V''' ) is None assert skip_list.find('''X''' ) is None assert skip_list.find('''Key1''' ) is None assert skip_list.find('''Key2''' ) is None def lowercase__ ( ): __UpperCAmelCase = SkipList() skip_list.insert('''Key1''' , 12 ) skip_list.insert('''V''' , 13 ) skip_list.insert('''X''' , 142 ) skip_list.insert('''Key2''' , 15 ) skip_list.delete('''X''' ) def traverse_keys(snake_case_ :int ): yield node.key for forward_node in node.forward: yield from traverse_keys(snake_case_ ) assert len(set(traverse_keys(skip_list.head ) ) ) == 4 def lowercase__ ( ): def is_sorted(snake_case_ :Union[str, Any] ): return all(next_item >= item for item, next_item in zip(snake_case_ , lst[1:] ) ) __UpperCAmelCase = SkipList() for i in range(10 ): skip_list.insert(snake_case_ , snake_case_ ) assert is_sorted(list(snake_case_ ) ) skip_list.delete(5 ) skip_list.delete(8 ) skip_list.delete(2 ) assert is_sorted(list(snake_case_ ) ) skip_list.insert(-12 , -12 ) skip_list.insert(77 , 77 ) assert is_sorted(list(snake_case_ ) ) def lowercase__ ( ): for _ in range(100 ): # Repeat test 100 times due to the probabilistic nature of skip list # random values == random bugs test_insert() test_insert_overrides_existing_value() test_searching_empty_list_returns_none() test_search() test_deleting_item_from_empty_list_do_nothing() test_deleted_items_are_not_founded_by_find_method() test_delete_removes_only_given_key() test_delete_doesnt_leave_dead_nodes() test_iter_always_yields_sorted_values() def lowercase__ ( ): __UpperCAmelCase = SkipList() skip_list.insert(2 , '''2''' ) skip_list.insert(4 , '''4''' ) skip_list.insert(6 , '''4''' ) skip_list.insert(4 , '''5''' ) skip_list.insert(8 , '''4''' ) skip_list.insert(9 , '''4''' ) skip_list.delete(4 ) print(snake_case_ ) if __name__ == "__main__": import doctest doctest.testmod() main()
49
import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() __A = logging.get_logger(__name__) __A = [ ["attention", "attn"], ["encoder_attention", "encoder_attn"], ["q_lin", "q_proj"], ["k_lin", "k_proj"], ["v_lin", "v_proj"], ["out_lin", "out_proj"], ["norm_embeddings", "layernorm_embedding"], ["position_embeddings", "embed_positions"], ["embeddings", "embed_tokens"], ["ffn.lin", "fc"], ] def lowercase__ ( A_: Optional[Any] ) -> Union[str, Any]: """simple docstring""" if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: __UpperCAmelCase =k.replace(A_ , A_ ) if k.startswith("""encoder""" ): __UpperCAmelCase =k.replace(""".attn""" , """.self_attn""" ) __UpperCAmelCase =k.replace("""norm1""" , """self_attn_layer_norm""" ) __UpperCAmelCase =k.replace("""norm2""" , """final_layer_norm""" ) elif k.startswith("""decoder""" ): __UpperCAmelCase =k.replace("""norm1""" , """self_attn_layer_norm""" ) __UpperCAmelCase =k.replace("""norm2""" , """encoder_attn_layer_norm""" ) __UpperCAmelCase =k.replace("""norm3""" , """final_layer_norm""" ) return k def lowercase__ ( A_: Tuple ) -> str: """simple docstring""" __UpperCAmelCase =[ """model.encoder.layernorm_embedding.weight""", """model.encoder.layernorm_embedding.bias""", """model.decoder.layernorm_embedding.weight""", """model.decoder.layernorm_embedding.bias""", ] for k in keys: __UpperCAmelCase =sd.pop(A_ ) __UpperCAmelCase =k.replace("""layernorm_embedding""" , """layer_norm""" ) assert new_k not in sd __UpperCAmelCase =v __A = ["START"] @torch.no_grad() def lowercase__ ( A_: List[Any] , A_: str , A_: int ) -> Optional[int]: """simple docstring""" __UpperCAmelCase =torch.load(A_ , map_location="""cpu""" ) __UpperCAmelCase =model["""model"""] __UpperCAmelCase =BlenderbotConfig.from_json_file(A_ ) __UpperCAmelCase =BlenderbotForConditionalGeneration(A_ ) __UpperCAmelCase =m.model.state_dict().keys() __UpperCAmelCase =[] __UpperCAmelCase ={} for k, v in sd.items(): if k in IGNORE_KEYS: continue __UpperCAmelCase =rename_state_dict_key(A_ ) if new_k not in valid_keys: failures.append([k, new_k] ) else: __UpperCAmelCase =v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(A_ ) m.model.load_state_dict(A_ , strict=A_ ) m.half() m.save_pretrained(A_ ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin") parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.") parser.add_argument( "--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use" ) __A = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
68
0
'''simple docstring''' import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = (DDIMParallelScheduler,) _UpperCamelCase = (('eta', 0.0), ('num_inference_steps', 50)) def UpperCamelCase_ ( self ,**_lowerCAmelCase ): lowerCamelCase__ = { """num_train_timesteps""": 10_00, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", """clip_sample""": True, } config.update(**_lowerCAmelCase ) return config def UpperCamelCase_ ( self ,**_lowerCAmelCase ): lowerCamelCase__ = self.scheduler_classes[0] lowerCamelCase__ = self.get_scheduler_config(**_lowerCAmelCase ) lowerCamelCase__ = scheduler_class(**_lowerCAmelCase ) lowerCamelCase__ , lowerCamelCase__ = 10, 0.0 lowerCamelCase__ = self.dummy_model() lowerCamelCase__ = self.dummy_sample_deter scheduler.set_timesteps(_lowerCAmelCase ) for t in scheduler.timesteps: lowerCamelCase__ = model(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = scheduler.step(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ).prev_sample return sample def UpperCamelCase_ ( self ): for timesteps in [1_00, 5_00, 10_00]: self.check_over_configs(num_train_timesteps=_lowerCAmelCase ) def UpperCamelCase_ ( self ): for steps_offset in [0, 1]: self.check_over_configs(steps_offset=_lowerCAmelCase ) lowerCamelCase__ = self.scheduler_classes[0] lowerCamelCase__ = self.get_scheduler_config(steps_offset=1 ) lowerCamelCase__ = scheduler_class(**_lowerCAmelCase ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps ,torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) ) def UpperCamelCase_ ( self ): for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] ,[0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=_lowerCAmelCase ,beta_end=_lowerCAmelCase ) def UpperCamelCase_ ( self ): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_lowerCAmelCase ) def UpperCamelCase_ ( self ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_lowerCAmelCase ) def UpperCamelCase_ ( self ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=_lowerCAmelCase ) def UpperCamelCase_ ( self ): for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=_lowerCAmelCase ) def UpperCamelCase_ ( self ): for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=_lowerCAmelCase ) def UpperCamelCase_ ( self ): self.check_over_configs(thresholding=_lowerCAmelCase ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=_lowerCAmelCase ,prediction_type=_lowerCAmelCase ,sample_max_value=_lowerCAmelCase ,) def UpperCamelCase_ ( self ): for t in [1, 10, 49]: self.check_over_forward(time_step=_lowerCAmelCase ) def UpperCamelCase_ ( self ): for t, num_inference_steps in zip([1, 10, 50] ,[10, 50, 5_00] ): self.check_over_forward(time_step=_lowerCAmelCase ,num_inference_steps=_lowerCAmelCase ) def UpperCamelCase_ ( self ): for t, eta in zip([1, 10, 49] ,[0.0, 0.5, 1.0] ): self.check_over_forward(time_step=_lowerCAmelCase ,eta=_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.scheduler_classes[0] lowerCamelCase__ = self.get_scheduler_config() lowerCamelCase__ = scheduler_class(**_lowerCAmelCase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_20 ,4_00 ) - 0.1_4771 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_80 ,9_60 ) - 0.3_2460 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_87 ,4_86 ) - 0.0_0979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_99 ,9_98 ) - 0.02 ) ) < 1E-5 def UpperCamelCase_ ( self ): lowerCamelCase__ = self.scheduler_classes[0] lowerCamelCase__ = self.get_scheduler_config() lowerCamelCase__ = scheduler_class(**_lowerCAmelCase ) lowerCamelCase__ , lowerCamelCase__ = 10, 0.0 scheduler.set_timesteps(_lowerCAmelCase ) lowerCamelCase__ = self.dummy_model() lowerCamelCase__ = self.dummy_sample_deter lowerCamelCase__ = self.dummy_sample_deter + 0.1 lowerCamelCase__ = self.dummy_sample_deter - 0.1 lowerCamelCase__ = samplea.shape[0] lowerCamelCase__ = torch.stack([samplea, samplea, samplea] ,dim=0 ) lowerCamelCase__ = torch.arange(_lowerCAmelCase )[0:3, None].repeat(1 ,_lowerCAmelCase ) lowerCamelCase__ = model(samples.flatten(0 ,1 ) ,timesteps.flatten(0 ,1 ) ) lowerCamelCase__ = scheduler.batch_step_no_noise(_lowerCAmelCase ,timesteps.flatten(0 ,1 ) ,samples.flatten(0 ,1 ) ,_lowerCAmelCase ) lowerCamelCase__ = torch.sum(torch.abs(_lowerCAmelCase ) ) lowerCamelCase__ = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_sum.item() - 1147.7904 ) < 1E-2 assert abs(result_mean.item() - 0.4982 ) < 1E-3 def UpperCamelCase_ ( self ): lowerCamelCase__ = self.full_loop() lowerCamelCase__ = torch.sum(torch.abs(_lowerCAmelCase ) ) lowerCamelCase__ = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_sum.item() - 172.0067 ) < 1E-2 assert abs(result_mean.item() - 0.22_3967 ) < 1E-3 def UpperCamelCase_ ( self ): lowerCamelCase__ = self.full_loop(prediction_type="""v_prediction""" ) lowerCamelCase__ = torch.sum(torch.abs(_lowerCAmelCase ) ) lowerCamelCase__ = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_sum.item() - 52.5302 ) < 1E-2 assert abs(result_mean.item() - 0.0684 ) < 1E-3 def UpperCamelCase_ ( self ): # We specify different beta, so that the first alpha is 0.99 lowerCamelCase__ = self.full_loop(set_alpha_to_one=_lowerCAmelCase ,beta_start=0.01 ) lowerCamelCase__ = torch.sum(torch.abs(_lowerCAmelCase ) ) lowerCamelCase__ = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_sum.item() - 149.8295 ) < 1E-2 assert abs(result_mean.item() - 0.1951 ) < 1E-3 def UpperCamelCase_ ( self ): # We specify different beta, so that the first alpha is 0.99 lowerCamelCase__ = self.full_loop(set_alpha_to_one=_lowerCAmelCase ,beta_start=0.01 ) lowerCamelCase__ = torch.sum(torch.abs(_lowerCAmelCase ) ) lowerCamelCase__ = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_sum.item() - 149.0784 ) < 1E-2 assert abs(result_mean.item() - 0.1941 ) < 1E-3
50
from itertools import permutations def lowercase__ ( A_: tuple ) -> bool: """simple docstring""" if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False __UpperCAmelCase =[7, 11, 13, 17] for i, test in enumerate(A_ ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def lowercase__ ( A_: int = 10 ) -> int: """simple docstring""" return sum( int("""""".join(map(A_ , A_ ) ) ) for num in permutations(range(A_ ) ) if is_substring_divisible(A_ ) ) if __name__ == "__main__": print(F"""{solution() = }""")
68
0
'''simple docstring''' from packaging import version from .import_utils import is_accelerate_available if is_accelerate_available(): import accelerate def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Any: """simple docstring""" if not is_accelerate_available(): return method UpperCAmelCase = version.parse(accelerate.__version__ ).base_version if version.parse(SCREAMING_SNAKE_CASE_ ) < version.parse('''0.17.0''' ): return method def wrapper(self : Tuple , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : Any ): if hasattr(self , '''_hf_hook''' ) and hasattr(self._hf_hook , '''pre_forward''' ): self._hf_hook.pre_forward(self ) return method(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) return wrapper
51
from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar __A = TypeVar("T") def lowercase__ ( A_: int ) -> int: """simple docstring""" return (position - 1) // 2 def lowercase__ ( A_: int ) -> int: """simple docstring""" return (2 * position) + 1 def lowercase__ ( A_: int ) -> int: """simple docstring""" return (2 * position) + 2 class _A ( Generic[T] ): """simple docstring""" def __init__( self : List[str] ) -> None: __UpperCAmelCase =[] __UpperCAmelCase ={} __UpperCAmelCase =0 def __len__( self : str ) -> int: return self.elements def __repr__( self : Dict ) -> str: return str(self.heap ) def _a ( self : Optional[int] ) -> bool: # Check if the priority queue is empty return self.elements == 0 def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None: # Add an element with given priority to the queue self.heap.append((elem, weight) ) __UpperCAmelCase =self.elements self.elements += 1 self._bubble_up(__SCREAMING_SNAKE_CASE ) def _a ( self : Optional[int] ) -> T: # Remove and return the element with lowest weight (highest priority) if self.elements > 1: self._swap_nodes(0 , self.elements - 1 ) __UpperCAmelCase , __UpperCAmelCase =self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: __UpperCAmelCase , __UpperCAmelCase =self.heap[0] self._bubble_down(__SCREAMING_SNAKE_CASE ) return elem def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None: # Update the weight of the given key __UpperCAmelCase =self.position_map[elem] __UpperCAmelCase =(elem, weight) if position > 0: __UpperCAmelCase =get_parent_position(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase , __UpperCAmelCase =self.heap[parent_position] if parent_weight > weight: self._bubble_up(__SCREAMING_SNAKE_CASE ) else: self._bubble_down(__SCREAMING_SNAKE_CASE ) else: self._bubble_down(__SCREAMING_SNAKE_CASE ) def _a ( self : Any , __SCREAMING_SNAKE_CASE : T ) -> None: # Place a node at the proper position (upward movement) [to be used internally # only] __UpperCAmelCase =self.position_map[elem] if curr_pos == 0: return None __UpperCAmelCase =get_parent_position(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase , __UpperCAmelCase =self.heap[curr_pos] __UpperCAmelCase , __UpperCAmelCase =self.heap[parent_position] if parent_weight > weight: self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return self._bubble_up(__SCREAMING_SNAKE_CASE ) return None def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : T ) -> None: # Place a node at the proper position (downward movement) [to be used # internally only] __UpperCAmelCase =self.position_map[elem] __UpperCAmelCase , __UpperCAmelCase =self.heap[curr_pos] __UpperCAmelCase =get_child_left_position(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =get_child_right_position(__SCREAMING_SNAKE_CASE ) if child_left_position < self.elements and child_right_position < self.elements: __UpperCAmelCase , __UpperCAmelCase =self.heap[child_left_position] __UpperCAmelCase , __UpperCAmelCase =self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return self._bubble_down(__SCREAMING_SNAKE_CASE ) if child_left_position < self.elements: __UpperCAmelCase , __UpperCAmelCase =self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return self._bubble_down(__SCREAMING_SNAKE_CASE ) else: return None if child_right_position < self.elements: __UpperCAmelCase , __UpperCAmelCase =self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return self._bubble_down(__SCREAMING_SNAKE_CASE ) return None def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> None: # Swap the nodes at the given positions __UpperCAmelCase =self.heap[nodea_pos][0] __UpperCAmelCase =self.heap[nodea_pos][0] __UpperCAmelCase , __UpperCAmelCase =( self.heap[nodea_pos], self.heap[nodea_pos], ) __UpperCAmelCase =nodea_pos __UpperCAmelCase =nodea_pos class _A ( Generic[T] ): """simple docstring""" def __init__( self : List[Any] ) -> None: __UpperCAmelCase ={} __UpperCAmelCase =0 def __repr__( self : Tuple ) -> str: return str(self.connections ) def __len__( self : str ) -> int: return self.nodes def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : T ) -> None: # Add a node in the graph if it is not in the graph if node not in self.connections: __UpperCAmelCase ={} self.nodes += 1 def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None: # Add an edge between 2 nodes in the graph self.add_node(__SCREAMING_SNAKE_CASE ) self.add_node(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =weight __UpperCAmelCase =weight def lowercase__ ( A_: GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]: """simple docstring""" __UpperCAmelCase ={node: maxsize for node in graph.connections} __UpperCAmelCase ={node: None for node in graph.connections} __UpperCAmelCase =MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(A_ , A_ ) if priority_queue.is_empty(): return dist, parent # initialization __UpperCAmelCase =priority_queue.extract_min() __UpperCAmelCase =0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: __UpperCAmelCase =dist[node] + graph.connections[node][neighbour] priority_queue.update_key(A_ , dist[neighbour] ) __UpperCAmelCase =node # running prim's algorithm while not priority_queue.is_empty(): __UpperCAmelCase =priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: __UpperCAmelCase =dist[node] + graph.connections[node][neighbour] priority_queue.update_key(A_ , dist[neighbour] ) __UpperCAmelCase =node return dist, parent
68
0
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging A = logging.get_logger(__name__) A = '''▁''' A = {'''vocab_file''': '''sentencepiece.bpe.model'''} A = { '''vocab_file''': { '''facebook/nllb-200-distilled-600M''': ( '''https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model''' ), } } A = { '''facebook/nllb-200-distilled-600M''': 1_024, } # fmt: off A = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn'''] class __lowercase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = VOCAB_FILES_NAMES __lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase = ['''input_ids''', '''attention_mask'''] __lowerCAmelCase = [] __lowerCAmelCase = [] def __init__( self , _UpperCAmelCase , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase = None , _UpperCAmelCase=None , _UpperCAmelCase=False , **_UpperCAmelCase , ): # Mask token behave like a normal word, i.e. include the space before it __a : List[Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token __a : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs __a : Any = legacy_behaviour super().__init__( bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , src_lang=_UpperCAmelCase , tgt_lang=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=_UpperCAmelCase , **_UpperCAmelCase , ) __a : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_UpperCAmelCase ) ) __a : Tuple = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' # spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s' # Mimic fairseq token-to-id alignment for the first 4 token __a : int = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab __a : Dict = 1 __a : Optional[Any] = len(self.sp_model ) __a : Dict = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_UpperCAmelCase ) } __a : Dict = {v: k for k, v in self.lang_code_to_id.items()} __a : str = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) __a : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} __a : Union[str, Any] = list(self.lang_code_to_id.keys() ) if additional_special_tokens is not None: # Only add those special tokens if they are not already there. self._additional_special_tokens.extend( [t for t in additional_special_tokens if t not in self._additional_special_tokens] ) __a : List[str] = src_lang if src_lang is not None else '''eng_Latn''' __a : List[Any] = self.lang_code_to_id[self._src_lang] __a : List[str] = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self ): __a : Dict = self.__dict__.copy() __a : Optional[Any] = None __a : int = self.sp_model.serialized_model_proto() return state def __setstate__( self , _UpperCAmelCase ): __a : List[str] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): __a : Dict = {} __a : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) @property def _lowerCamelCase ( self ): return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def _lowerCamelCase ( self ): return self._src_lang @src_lang.setter def _lowerCamelCase ( self , _UpperCAmelCase ): __a : Optional[int] = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase ) __a : List[Any] = [1] * len(self.prefix_tokens ) __a : List[str] = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(_UpperCAmelCase )) + suffix_ones return prefix_ones + ([0] * len(_UpperCAmelCase )) + ([0] * len(_UpperCAmelCase )) + suffix_ones def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ): __a : Dict = [self.sep_token_id] __a : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ): if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) __a : List[str] = src_lang __a : Dict = self(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ) __a : Any = self.convert_tokens_to_ids(_UpperCAmelCase ) __a : int = tgt_lang_id return inputs def _lowerCamelCase ( self ): __a : Tuple = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _lowerCamelCase ( self , _UpperCAmelCase ): return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase ) def _lowerCamelCase ( self , _UpperCAmelCase ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __a : Tuple = self.sp_model.PieceToId(_UpperCAmelCase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _lowerCamelCase ( self , _UpperCAmelCase ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _lowerCamelCase ( self , _UpperCAmelCase ): __a : int = ''''''.join(_UpperCAmelCase ).replace(_UpperCAmelCase , ''' ''' ).strip() return out_string def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ): if not os.path.isdir(_UpperCAmelCase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __a : List[str] = os.path.join( _UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCAmelCase , '''wb''' ) as fi: __a : int = self.sp_model.serialized_model_proto() fi.write(_UpperCAmelCase ) return (out_vocab_file,) def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = "eng_Latn" , _UpperCAmelCase = None , _UpperCAmelCase = "fra_Latn" , **_UpperCAmelCase , ): __a : Optional[int] = src_lang __a : Tuple = tgt_lang return super().prepare_seqaseq_batch(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) def _lowerCamelCase ( self ): return self.set_src_lang_special_tokens(self.src_lang ) def _lowerCamelCase ( self ): return self.set_tgt_lang_special_tokens(self.tgt_lang ) def _lowerCamelCase ( self , _UpperCAmelCase ): __a : List[str] = self.lang_code_to_id[src_lang] if self.legacy_behaviour: __a : Any = [] __a : Dict = [self.eos_token_id, self.cur_lang_code] else: __a : Dict = [self.cur_lang_code] __a : Optional[Any] = [self.eos_token_id] def _lowerCamelCase ( self , _UpperCAmelCase ): __a : Dict = self.lang_code_to_id[lang] if self.legacy_behaviour: __a : List[Any] = [] __a : str = [self.eos_token_id, self.cur_lang_code] else: __a : List[str] = [self.cur_lang_code] __a : Union[str, Any] = [self.eos_token_id]
52
from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf __A = logging.get_logger(__name__) @dataclass class _A ( UpperCamelCase ): """simple docstring""" lowerCamelCase : Optional[int] = [ 'no_inference', 'no_cuda', 'no_tpu', 'no_speed', 'no_memory', 'no_env_print', 'no_multi_process', ] def __init__( self : Any , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Dict: for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: __UpperCAmelCase =deprecated_arg[3:] __UpperCAmelCase =not kwargs.pop(__SCREAMING_SNAKE_CASE ) logger.warning( f'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or''' f''' {positive_arg}={kwargs[positive_arg]}''' ) __UpperCAmelCase =kwargs.pop("""tpu_name""" , self.tpu_name ) __UpperCAmelCase =kwargs.pop("""device_idx""" , self.device_idx ) __UpperCAmelCase =kwargs.pop("""eager_mode""" , self.eager_mode ) __UpperCAmelCase =kwargs.pop("""use_xla""" , self.use_xla ) super().__init__(**__SCREAMING_SNAKE_CASE ) lowerCamelCase : str = field( default=UpperCamelCase , metadata={'help': 'Name of TPU'} , ) lowerCamelCase : int = field( default=0 , metadata={'help': 'CPU / GPU device index. Defaults to 0.'} , ) lowerCamelCase : bool = field(default=UpperCamelCase , metadata={'help': 'Benchmark models in eager model.'} ) lowerCamelCase : bool = field( default=UpperCamelCase , metadata={ 'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.' } , ) @cached_property def _a ( self : List[str] ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]: requires_backends(self , ["""tf"""] ) __UpperCAmelCase =None if self.tpu: try: if self.tpu_name: __UpperCAmelCase =tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: __UpperCAmelCase =tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: __UpperCAmelCase =None return tpu @cached_property def _a ( self : Tuple ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]: requires_backends(self , ["""tf"""] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) __UpperCAmelCase =tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" ) __UpperCAmelCase =tf.distribute.OneDeviceStrategy(device=f'''/gpu:{self.device_idx}''' ) else: tf.config.set_visible_devices([] , """GPU""" ) # disable GPU __UpperCAmelCase =tf.distribute.OneDeviceStrategy(device=f'''/cpu:{self.device_idx}''' ) return strategy @property def _a ( self : Optional[Any] ) -> bool: requires_backends(self , ["""tf"""] ) return self._setup_tpu is not None @property def _a ( self : str ) -> "tf.distribute.Strategy": requires_backends(self , ["""tf"""] ) return self._setup_strategy @property def _a ( self : Dict ) -> Optional[int]: requires_backends(self , ["""tf"""] ) return tf.config.list_physical_devices("""GPU""" ) @property def _a ( self : List[str] ) -> int: requires_backends(self , ["""tf"""] ) if self.cuda: return len(self.gpu_list ) return 0 @property def _a ( self : List[str] ) -> bool: return self.n_gpu > 0
68
0
import csv from collections import defaultdict from dataclasses import dataclass, field from typing import List, Optional import matplotlib.pyplot as plt import numpy as np from matplotlib.ticker import ScalarFormatter from transformers import HfArgumentParser def a_ ( lowerCAmelCase_ : str=None, lowerCAmelCase_ : Any=None ): return field(default_factory=lambda: default, metadata=lowerCAmelCase_ ) @dataclass class _UpperCAmelCase : """simple docstring""" a_ = field( metadata={"""help""": """The csv file to plot."""} , ) a_ = field( default=_UpperCamelCase , metadata={"""help""": """Whether to plot along batch size or sequence length. Defaults to sequence length."""} , ) a_ = field( default=_UpperCamelCase , metadata={"""help""": """Whether the csv file has time results or memory results. Defaults to memory results."""} , ) a_ = field( default=_UpperCamelCase , metadata={"""help""": """Disable logarithmic scale when plotting"""} , ) a_ = field( default=_UpperCamelCase , metadata={ """help""": """Whether the csv file has training results or inference results. Defaults to inference results.""" } , ) a_ = field( default=_UpperCamelCase , metadata={"""help""": """Filename under which the plot will be saved. If unused no plot is saved."""} , ) a_ = list_field( default=_UpperCamelCase , metadata={"""help""": """List of model names that are used instead of the ones in the csv file."""} ) def a_ ( lowerCAmelCase_ : Tuple ): try: int(lowerCAmelCase_ ) return True except ValueError: return False def a_ ( lowerCAmelCase_ : Dict ): try: float(lowerCAmelCase_ ) return True except ValueError: return False class _UpperCAmelCase : """simple docstring""" def __init__( self : Union[str, Any] , lowerCAmelCase_ : List[Any] ) -> Union[str, Any]: __lowerCAmelCase = args __lowerCAmelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} ) with open(self.args.csv_file , newline='' ) as csv_file: __lowerCAmelCase = csv.DictReader(lowerCAmelCase_ ) for row in reader: __lowerCAmelCase = row['model'] self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) ) self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) ) if can_convert_to_int(row['result'] ): # value is not None __lowerCAmelCase = int(row['result'] ) elif can_convert_to_float(row['result'] ): # value is not None __lowerCAmelCase = float(row['result'] ) def lowercase ( self : List[Any] ) -> Tuple: __lowerCAmelCase , __lowerCAmelCase = plt.subplots() __lowerCAmelCase = 'Time usage' if self.args.is_time else 'Memory usage' __lowerCAmelCase = title_str + ' for training' if self.args.is_train else title_str + ' for inference' if not self.args.no_log_scale: # set logarithm scales ax.set_xscale('log' ) ax.set_yscale('log' ) for axis in [ax.xaxis, ax.yaxis]: axis.set_major_formatter(ScalarFormatter() ) for model_name_idx, model_name in enumerate(self.result_dict.keys() ): __lowerCAmelCase = sorted(set(self.result_dict[model_name]['bsz'] ) ) __lowerCAmelCase = sorted(set(self.result_dict[model_name]['seq_len'] ) ) __lowerCAmelCase = self.result_dict[model_name]['result'] ((__lowerCAmelCase) , (__lowerCAmelCase)) = ( (batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes) ) __lowerCAmelCase = ( model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx] ) for inner_loop_value in inner_loop_array: if self.args.plot_along_batch: __lowerCAmelCase = np.asarray( [results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=lowerCAmelCase_ , ) else: __lowerCAmelCase = np.asarray( [results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , ) ((__lowerCAmelCase) , (__lowerCAmelCase)) = ( ('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz') ) __lowerCAmelCase = np.asarray(lowerCAmelCase_ , lowerCAmelCase_ )[: len(lowerCAmelCase_ )] plt.scatter( lowerCAmelCase_ , lowerCAmelCase_ , label=f"""{label_model_name} - {inner_loop_label}: {inner_loop_value}""" ) plt.plot(lowerCAmelCase_ , lowerCAmelCase_ , '--' ) title_str += f""" {label_model_name} vs.""" __lowerCAmelCase = title_str[:-4] __lowerCAmelCase = 'Time in s' if self.args.is_time else 'Memory in MB' # plot plt.title(lowerCAmelCase_ ) plt.xlabel(lowerCAmelCase_ ) plt.ylabel(lowerCAmelCase_ ) plt.legend() if self.args.figure_png_file is not None: plt.savefig(self.args.figure_png_file ) else: plt.show() def a_ ( ): __lowerCAmelCase = HfArgumentParser(lowerCAmelCase_ ) __lowerCAmelCase = parser.parse_args_into_dataclasses()[0] __lowerCAmelCase = Plot(args=lowerCAmelCase_ ) plot.plot() if __name__ == "__main__": main()
53
import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class _A ( unittest.TestCase ): """simple docstring""" @property def _a ( self : List[str] ) -> Dict: torch.manual_seed(0 ) __UpperCAmelCase =UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , ) return model def _a ( self : int ) -> Union[str, Any]: __UpperCAmelCase =self.dummy_uncond_unet __UpperCAmelCase =ScoreSdeVeScheduler() __UpperCAmelCase =ScoreSdeVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE ) sde_ve.to(__SCREAMING_SNAKE_CASE ) sde_ve.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =torch.manual_seed(0 ) __UpperCAmelCase =sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=__SCREAMING_SNAKE_CASE ).images __UpperCAmelCase =torch.manual_seed(0 ) __UpperCAmelCase =sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )[ 0 ] __UpperCAmelCase =image[0, -3:, -3:, -1] __UpperCAmelCase =image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __UpperCAmelCase =np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class _A ( unittest.TestCase ): """simple docstring""" def _a ( self : Optional[int] ) -> int: __UpperCAmelCase ="""google/ncsnpp-church-256""" __UpperCAmelCase =UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =ScoreSdeVeScheduler.from_pretrained(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =ScoreSdeVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE ) sde_ve.to(__SCREAMING_SNAKE_CASE ) sde_ve.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =torch.manual_seed(0 ) __UpperCAmelCase =sde_ve(num_inference_steps=10 , output_type="""numpy""" , generator=__SCREAMING_SNAKE_CASE ).images __UpperCAmelCase =image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) __UpperCAmelCase =np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
68
0
def a__ ( lowercase__ , lowercase__ ): '''simple docstring''' UpperCAmelCase_ ="" for i in table: res += inp[i - 1] return res def a__ ( lowercase__ ): '''simple docstring''' return data[1:] + data[0] def a__ ( lowercase__ , lowercase__ ): '''simple docstring''' UpperCAmelCase_ ="" for i in range(len(lowercase__ ) ): if a[i] == b[i]: res += "0" else: res += "1" return res def a__ ( lowercase__ , lowercase__ ): '''simple docstring''' UpperCAmelCase_ =int("0b" + data[0] + data[-1] , 2 ) UpperCAmelCase_ =int("0b" + data[1:3] , 2 ) return bin(s[row][col] )[2:] def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ): '''simple docstring''' UpperCAmelCase_ =message[:4] UpperCAmelCase_ =message[4:] UpperCAmelCase_ =apply_table(lowercase__ , lowercase__ ) UpperCAmelCase_ =xor(lowercase__ , lowercase__ ) UpperCAmelCase_ =apply_sbox(lowercase__ , temp[:4] ) # noqa: E741 UpperCAmelCase_ =apply_sbox(lowercase__ , temp[4:] ) UpperCAmelCase_ ="0" * (2 - len(lowercase__ )) + l # noqa: E741 UpperCAmelCase_ ="0" * (2 - len(lowercase__ )) + r UpperCAmelCase_ =apply_table(l + r , lowercase__ ) UpperCAmelCase_ =xor(lowercase__ , lowercase__ ) return temp + right if __name__ == "__main__": __lowercase : Union[str, Any] =input("""Enter 10 bit key: """) __lowercase : Optional[Any] =input("""Enter 8 bit message: """) __lowercase : str =[6, 3, 7, 4, 8, 5, 10, 9] __lowercase : List[Any] =[3, 5, 2, 7, 4, 10, 1, 9, 8, 6] __lowercase : Dict =[2, 4, 3, 1] __lowercase : Optional[Any] =[2, 6, 3, 1, 4, 8, 5, 7] __lowercase : int =[4, 1, 3, 5, 7, 2, 8, 6] __lowercase : Any =[4, 1, 2, 3, 2, 3, 4, 1] __lowercase : int =[[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]] __lowercase : List[str] =[[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]] # key generation __lowercase : List[str] =apply_table(key, paa_table) __lowercase : Dict =temp[:5] __lowercase : List[Any] =temp[5:] __lowercase : Union[str, Any] =left_shift(left) __lowercase : Optional[Any] =left_shift(right) __lowercase : Tuple =apply_table(left + right, pa_table) __lowercase : List[str] =left_shift(left) __lowercase : List[str] =left_shift(right) __lowercase : int =left_shift(left) __lowercase : Any =left_shift(right) __lowercase : Tuple =apply_table(left + right, pa_table) # encryption __lowercase : Union[str, Any] =apply_table(message, IP) __lowercase : Dict =function(expansion, sa, sa, keya, temp) __lowercase : Dict =temp[4:] + temp[:4] __lowercase : str =function(expansion, sa, sa, keya, temp) __lowercase : Tuple =apply_table(temp, IP_inv) print("""Cipher text is:""", CT) # decryption __lowercase : List[str] =apply_table(CT, IP) __lowercase : str =function(expansion, sa, sa, keya, temp) __lowercase : int =temp[4:] + temp[:4] __lowercase : int =function(expansion, sa, sa, keya, temp) __lowercase : Optional[int] =apply_table(temp, IP_inv) print("""Plain text after decypting is:""", PT)
54
from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging from transformers.integrations import is_fairscale_available from transformers.models.fsmt.configuration_fsmt import FSMTConfig from transformers.optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.trainer_pt_utils import get_tpu_sampler from transformers.training_args import ParallelMode from transformers.utils import is_torch_tpu_available if is_fairscale_available(): from fairscale.optim import OSS __A = logging.get_logger(__name__) __A = { "linear": get_linear_schedule_with_warmup, "cosine": get_cosine_schedule_with_warmup, "cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup, "polynomial": get_polynomial_decay_schedule_with_warmup, "constant": get_constant_schedule, "constant_w_warmup": get_constant_schedule_with_warmup, } class _A ( UpperCamelCase ): """simple docstring""" def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : str=None , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Any: super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if config is None: assert isinstance(self.model , __SCREAMING_SNAKE_CASE ), ( "If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is" f''' {self.model.__class__}''' ) __UpperCAmelCase =self.model.config else: __UpperCAmelCase =config __UpperCAmelCase =data_args __UpperCAmelCase =self.config.tgt_vocab_size if isinstance(self.config , __SCREAMING_SNAKE_CASE ) else self.config.vocab_size if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss): assert self.config.pad_token_id is not None, ( "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss" " calculation or doing label smoothing." ) if self.config.pad_token_id is None and self.config.eos_token_id is not None: logger.warning( f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for''' """ padding..""" ) if self.args.label_smoothing == 0: __UpperCAmelCase =torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id ) else: # dynamically import label_smoothed_nll_loss from utils import label_smoothed_nll_loss __UpperCAmelCase =label_smoothed_nll_loss def _a ( self : Any , __SCREAMING_SNAKE_CASE : int ) -> Any: if self.optimizer is None: __UpperCAmelCase =["""bias""", """LayerNorm.weight"""] __UpperCAmelCase =[ { """params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )], """weight_decay""": self.args.weight_decay, }, { """params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )], """weight_decay""": 0.0, }, ] __UpperCAmelCase =Adafactor if self.args.adafactor else AdamW if self.args.adafactor: __UpperCAmelCase =Adafactor __UpperCAmelCase ={"""scale_parameter""": False, """relative_step""": False} else: __UpperCAmelCase =AdamW __UpperCAmelCase ={ """betas""": (self.args.adam_betaa, self.args.adam_betaa), """eps""": self.args.adam_epsilon, } __UpperCAmelCase =self.args.learning_rate if self.sharded_ddp: __UpperCAmelCase =OSS( params=__SCREAMING_SNAKE_CASE , optim=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) else: __UpperCAmelCase =optimizer_cls(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if self.lr_scheduler is None: __UpperCAmelCase =self._get_lr_scheduler(__SCREAMING_SNAKE_CASE ) else: # ignoring --lr_scheduler logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" ) def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Any: __UpperCAmelCase =arg_to_scheduler[self.args.lr_scheduler] if self.args.lr_scheduler == "constant": __UpperCAmelCase =schedule_func(self.optimizer ) elif self.args.lr_scheduler == "constant_w_warmup": __UpperCAmelCase =schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps ) else: __UpperCAmelCase =schedule_func( self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__SCREAMING_SNAKE_CASE ) return scheduler def _a ( self : Optional[Any] ) -> Optional[torch.utils.data.Sampler]: if isinstance(self.train_dataset , torch.utils.data.IterableDataset ): return None elif is_torch_tpu_available(): return get_tpu_sampler(self.train_dataset ) else: if self.args.sortish_sampler: self.train_dataset.make_sortish_sampler( self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , ) return ( RandomSampler(self.train_dataset ) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset ) ) def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple: if self.args.label_smoothing == 0: if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token __UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[0] __UpperCAmelCase =self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) ) else: # compute usual loss via models __UpperCAmelCase , __UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[:2] else: # compute label smoothed loss __UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[0] __UpperCAmelCase =torch.nn.functional.log_softmax(__SCREAMING_SNAKE_CASE , dim=-1 ) __UpperCAmelCase , __UpperCAmelCase =self.loss_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.args.label_smoothing , ignore_index=self.config.pad_token_id ) return loss, logits def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Dict: __UpperCAmelCase =inputs.pop("""labels""" ) __UpperCAmelCase , __UpperCAmelCase =self._compute_loss(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return loss def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : nn.Module , __SCREAMING_SNAKE_CASE : Dict[str, Union[torch.Tensor, Any]] , __SCREAMING_SNAKE_CASE : bool , __SCREAMING_SNAKE_CASE : Optional[List[str]] = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: __UpperCAmelCase =self._prepare_inputs(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase ={ """max_length""": self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, """num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams, } if self.args.predict_with_generate and not self.args.prediction_loss_only: __UpperCAmelCase =self.model.generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **__SCREAMING_SNAKE_CASE , ) # in case the batch is shorter than max length, the output should be padded if generated_tokens.shape[-1] < gen_kwargs["max_length"]: __UpperCAmelCase =self._pad_tensors_to_max_len(__SCREAMING_SNAKE_CASE , gen_kwargs["""max_length"""] ) __UpperCAmelCase =inputs.pop("""labels""" ) with torch.no_grad(): # compute loss on predict data __UpperCAmelCase , __UpperCAmelCase =self._compute_loss(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =loss.mean().detach() if self.args.prediction_loss_only: return (loss, None, None) __UpperCAmelCase =generated_tokens if self.args.predict_with_generate else logits if labels.shape[-1] < gen_kwargs["max_length"]: __UpperCAmelCase =self._pad_tensors_to_max_len(__SCREAMING_SNAKE_CASE , gen_kwargs["""max_length"""] ) return (loss, logits, labels) def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int ) -> List[Any]: # If PAD token is not defined at least EOS token has to be defined __UpperCAmelCase =self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id if pad_token_id is None: raise ValueError( """Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be""" f''' padded to `max_length`={max_length}''' ) __UpperCAmelCase =pad_token_id * torch.ones( (tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device ) __UpperCAmelCase =tensor return padded_tensor
68
0
import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig SCREAMING_SNAKE_CASE :Optional[Any] = logging.get_logger(__name__) class UpperCAmelCase : '''simple docstring''' def __init__( self : Any ,A : str ,A : Any ): __A = question_encoder __A = generator __A = self.question_encoder def UpperCamelCase_ ( self : Dict ,A : List[Any] ): if os.path.isfile(A ): raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' ) os.makedirs(A ,exist_ok=A ) __A = os.path.join(A ,"question_encoder_tokenizer" ) __A = os.path.join(A ,"generator_tokenizer" ) self.question_encoder.save_pretrained(A ) self.generator.save_pretrained(A ) @classmethod def UpperCamelCase_ ( cls : Any ,A : str ,**A : Optional[Any] ): # dynamically import AutoTokenizer from ..auto.tokenization_auto import AutoTokenizer __A = kwargs.pop("config" ,A ) if config is None: __A = RagConfig.from_pretrained(A ) __A = AutoTokenizer.from_pretrained( A ,config=config.question_encoder ,subfolder="question_encoder_tokenizer" ) __A = AutoTokenizer.from_pretrained( A ,config=config.generator ,subfolder="generator_tokenizer" ) return cls(question_encoder=A ,generator=A ) def __call__( self : Tuple ,*A : List[str] ,**A : str ): return self.current_tokenizer(*A ,**A ) def UpperCamelCase_ ( self : List[str] ,*A : List[str] ,**A : List[str] ): return self.generator.batch_decode(*A ,**A ) def UpperCamelCase_ ( self : int ,*A : int ,**A : Union[str, Any] ): return self.generator.decode(*A ,**A ) def UpperCamelCase_ ( self : Dict ): __A = self.question_encoder def UpperCamelCase_ ( self : Any ): __A = self.generator def UpperCamelCase_ ( self : Optional[Any] ,A : List[str] ,A : Optional[List[str]] = None ,A : Optional[int] = None ,A : Optional[int] = None ,A : str = "longest" ,A : str = None ,A : bool = True ,**A : Tuple ,): warnings.warn( "`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the " "regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` " "context manager to prepare your targets. See the documentation of your specific tokenizer for more " "details" ,A ,) if max_length is None: __A = self.current_tokenizer.model_max_length __A = self( A ,add_special_tokens=A ,return_tensors=A ,max_length=A ,padding=A ,truncation=A ,**A ,) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: __A = self.current_tokenizer.model_max_length __A = self( text_target=A ,add_special_tokens=A ,return_tensors=A ,padding=A ,max_length=A ,truncation=A ,**A ,) __A = labels["input_ids"] return model_inputs
55
import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _A ( UpperCamelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase : List[Any] = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline' def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str=0 ) -> Any: __UpperCAmelCase =floats_tensor((1, 3, 128, 128) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ) __UpperCAmelCase =np.random.RandomState(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase ={ """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 3, """strength""": 0.75, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def _a ( self : Optional[Any] ) -> int: __UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =self.get_dummy_inputs() __UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images __UpperCAmelCase =image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) __UpperCAmelCase =np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def _a ( self : Union[str, Any] ) -> Union[str, Any]: __UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __UpperCAmelCase =PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =self.get_dummy_inputs() __UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images __UpperCAmelCase =image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __UpperCAmelCase =np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _a ( self : Optional[Any] ) -> Dict: __UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __UpperCAmelCase =LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) # warmup pass to apply optimizations __UpperCAmelCase =pipe(**self.get_dummy_inputs() ) __UpperCAmelCase =self.get_dummy_inputs() __UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images __UpperCAmelCase =image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __UpperCAmelCase =np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _a ( self : List[Any] ) -> List[str]: __UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __UpperCAmelCase =EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =self.get_dummy_inputs() __UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images __UpperCAmelCase =image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __UpperCAmelCase =np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _a ( self : Union[str, Any] ) -> Optional[Any]: __UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __UpperCAmelCase =EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =self.get_dummy_inputs() __UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images __UpperCAmelCase =image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __UpperCAmelCase =np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _a ( self : Union[str, Any] ) -> Dict: __UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __UpperCAmelCase =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =self.get_dummy_inputs() __UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images __UpperCAmelCase =image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __UpperCAmelCase =np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class _A ( unittest.TestCase ): """simple docstring""" @property def _a ( self : List[str] ) -> Optional[int]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _a ( self : Dict ) -> int: __UpperCAmelCase =ort.SessionOptions() __UpperCAmelCase =False return options def _a ( self : Dict ) -> Any: __UpperCAmelCase =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) __UpperCAmelCase =init_image.resize((768, 512) ) # using the PNDM scheduler by default __UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained( """CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase ="""A fantasy landscape, trending on artstation""" __UpperCAmelCase =np.random.RandomState(0 ) __UpperCAmelCase =pipe( prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__SCREAMING_SNAKE_CASE , output_type="""np""" , ) __UpperCAmelCase =output.images __UpperCAmelCase =images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) __UpperCAmelCase =np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def _a ( self : List[str] ) -> str: __UpperCAmelCase =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) __UpperCAmelCase =init_image.resize((768, 512) ) __UpperCAmelCase =LMSDiscreteScheduler.from_pretrained( """runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" ) __UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase ="""A fantasy landscape, trending on artstation""" __UpperCAmelCase =np.random.RandomState(0 ) __UpperCAmelCase =pipe( prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__SCREAMING_SNAKE_CASE , output_type="""np""" , ) __UpperCAmelCase =output.images __UpperCAmelCase =images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) __UpperCAmelCase =np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
68
0
'''simple docstring''' import argparse import pytorch_lightning as pl import torch from torch import nn from transformers import LongformerForQuestionAnswering, LongformerModel class _lowercase ( pl.LightningModule ): def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : int ) -> str: super().__init__() __snake_case = model __snake_case = 2 __snake_case = nn.Linear(self.model.config.hidden_size , self.num_labels ) def a ( self : int ) -> Tuple: pass def _a (lowercase__ : str , lowercase__ : str , lowercase__ : str ) -> int: """simple docstring""" # load longformer model from model identifier __snake_case = LongformerModel.from_pretrained(lowercase__ ) __snake_case = LightningModel(lowercase__ ) __snake_case = torch.load(lowercase__ , map_location=torch.device('cpu' ) ) lightning_model.load_state_dict(ckpt['state_dict'] ) # init longformer question answering model __snake_case = LongformerForQuestionAnswering.from_pretrained(lowercase__ ) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() ) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() ) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(lowercase__ ) print(f'Conversion successful. Model saved under {pytorch_dump_folder_path}' ) if __name__ == "__main__": _a : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--longformer_model", default=None, type=str, required=True, help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.", ) parser.add_argument( "--longformer_question_answering_ckpt_path", default=None, type=str, required=True, help="Path the official PyTorch Lightning Checkpoint.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) _a : Union[str, Any] = parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
56
import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors __A = logging.getLogger(__name__) class _A ( UpperCamelCase ): """simple docstring""" lowerCamelCase : Optional[Any] = 'sequence-classification' def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Tuple ) -> Optional[Any]: if type(__SCREAMING_SNAKE_CASE ) == dict: __UpperCAmelCase =Namespace(**__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =glue_output_modes[hparams.task] __UpperCAmelCase =glue_tasks_num_labels[hparams.task] super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.mode ) def _a ( self : str , **__SCREAMING_SNAKE_CASE : Dict ) -> List[str]: return self.model(**__SCREAMING_SNAKE_CASE ) def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict ) -> List[Any]: __UpperCAmelCase ={"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: __UpperCAmelCase =batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None __UpperCAmelCase =self(**__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =outputs[0] __UpperCAmelCase =self.trainer.lr_schedulers[0]["""scheduler"""] __UpperCAmelCase ={"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def _a ( self : Tuple ) -> List[Any]: __UpperCAmelCase =self.hparams __UpperCAmelCase =processors[args.task]() __UpperCAmelCase =processor.get_labels() for mode in ["train", "dev"]: __UpperCAmelCase =self._feature_file(__SCREAMING_SNAKE_CASE ) if os.path.exists(__SCREAMING_SNAKE_CASE ) and not args.overwrite_cache: logger.info("""Loading features from cached file %s""" , __SCREAMING_SNAKE_CASE ) else: logger.info("""Creating features from dataset file at %s""" , args.data_dir ) __UpperCAmelCase =( processor.get_dev_examples(args.data_dir ) if mode == """dev""" else processor.get_train_examples(args.data_dir ) ) __UpperCAmelCase =convert_examples_to_features( __SCREAMING_SNAKE_CASE , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info("""Saving features into cached file %s""" , __SCREAMING_SNAKE_CASE ) torch.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : bool = False ) -> DataLoader: __UpperCAmelCase ="""dev""" if mode == """test""" else mode __UpperCAmelCase =self._feature_file(__SCREAMING_SNAKE_CASE ) logger.info("""Loading features from cached file %s""" , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =torch.load(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =torch.tensor([f.input_ids for f in features] , dtype=torch.long ) __UpperCAmelCase =torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) __UpperCAmelCase =torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) if self.hparams.glue_output_mode == "classification": __UpperCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.long ) elif self.hparams.glue_output_mode == "regression": __UpperCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.float ) return DataLoader( TensorDataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , batch_size=__SCREAMING_SNAKE_CASE , shuffle=__SCREAMING_SNAKE_CASE , ) def _a ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int ) -> str: __UpperCAmelCase ={"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: __UpperCAmelCase =batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None __UpperCAmelCase =self(**__SCREAMING_SNAKE_CASE ) __UpperCAmelCase , __UpperCAmelCase =outputs[:2] __UpperCAmelCase =logits.detach().cpu().numpy() __UpperCAmelCase =inputs["""labels"""].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Any ) -> tuple: __UpperCAmelCase =torch.stack([x["""val_loss"""] for x in outputs] ).mean().detach().cpu().item() __UpperCAmelCase =np.concatenate([x["""pred"""] for x in outputs] , axis=0 ) if self.hparams.glue_output_mode == "classification": __UpperCAmelCase =np.argmax(__SCREAMING_SNAKE_CASE , axis=1 ) elif self.hparams.glue_output_mode == "regression": __UpperCAmelCase =np.squeeze(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =np.concatenate([x["""target"""] for x in outputs] , axis=0 ) __UpperCAmelCase =[[] for _ in range(out_label_ids.shape[0] )] __UpperCAmelCase =[[] for _ in range(out_label_ids.shape[0] )] __UpperCAmelCase ={**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )} __UpperCAmelCase =dict(results.items() ) __UpperCAmelCase =results return ret, preds_list, out_label_list def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : list ) -> dict: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._eval_end(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =ret["""log"""] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : List[str] ) -> dict: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._eval_end(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =ret["""log"""] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def _a ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict ) -> Optional[Any]: BaseTransformer.add_model_specific_args(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) parser.add_argument( """--max_seq_length""" , default=128 , type=__SCREAMING_SNAKE_CASE , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--task""" , default="""""" , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""The GLUE task to run""" , ) parser.add_argument( """--gpus""" , default=0 , type=__SCREAMING_SNAKE_CASE , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , ) parser.add_argument( """--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" ) return parser def lowercase__ ( ) -> str: """simple docstring""" __UpperCAmelCase =argparse.ArgumentParser() add_generic_args(A_ , os.getcwd() ) __UpperCAmelCase =GLUETransformer.add_model_specific_args(A_ , os.getcwd() ) __UpperCAmelCase =parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: __UpperCAmelCase =os.path.join( """./results""" , F'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' , ) os.makedirs(args.output_dir ) __UpperCAmelCase =GLUETransformer(A_ ) __UpperCAmelCase =generic_train(A_ , A_ ) # Optionally, predict on dev set and write to output_dir if args.do_predict: __UpperCAmelCase =sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=A_ ) ) __UpperCAmelCase =model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(A_ ) if __name__ == "__main__": main()
68
0
import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin A_ : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece_no_bos.model') @require_sentencepiece @require_tokenizers class _lowerCAmelCase( UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" a : str =PegasusTokenizer a : List[Any] =PegasusTokenizerFast a : Optional[Any] =True a : List[Any] =True def _a ( self ): super().setUp() # We have a SentencePiece fixture for testing UpperCamelCase_: List[Any] = PegasusTokenizer(_lowerCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _a ( self ): return PegasusTokenizer.from_pretrained('google/pegasus-large' ) def _a ( self , **_lowerCamelCase ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def _a ( self , _lowerCamelCase ): return ("This is a test", "This is a test") def _a ( self ): UpperCamelCase_: Optional[Any] = '</s>' UpperCamelCase_: Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) , _lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) , _lowerCamelCase ) def _a ( self ): UpperCamelCase_: Optional[int] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<pad>' ) self.assertEqual(vocab_keys[1] , '</s>' ) self.assertEqual(vocab_keys[-1] , 'v' ) self.assertEqual(len(_lowerCamelCase ) , 1_1_0_3 ) def _a ( self ): self.assertEqual(self.get_tokenizer().vocab_size , 1_1_0_3 ) def _a ( self ): UpperCamelCase_: List[str] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) UpperCamelCase_: Dict = self.tokenizer_class.from_pretrained(self.tmpdirname ) UpperCamelCase_: int = ( 'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important' ' </s> <pad> <pad> <pad>' ) UpperCamelCase_: Any = rust_tokenizer([raw_input_str] , return_tensors=_lowerCamelCase , add_special_tokens=_lowerCamelCase ).input_ids[0] UpperCamelCase_: int = py_tokenizer([raw_input_str] , return_tensors=_lowerCamelCase , add_special_tokens=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self ): UpperCamelCase_: Dict = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word UpperCamelCase_: int = '<mask_1> To ensure a <mask_2> flow of bank resolutions.' UpperCamelCase_: Optional[int] = [2, 4_1_3, 6_1_5, 1_1_4, 3, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1] UpperCamelCase_: Dict = tokenizer([raw_input_str] , return_tensors=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self ): UpperCamelCase_: List[Any] = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_6_1_0_3 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 1_0_3 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_0_5 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1_0_2_4 UpperCamelCase_: Union[str, Any] = 'To ensure a smooth flow of bank resolutions.' UpperCamelCase_: List[Any] = [4_1_3, 6_1_5, 1_1_4, 2_2_9_1, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1] UpperCamelCase_: Optional[Any] = tokenizer([raw_input_str] , return_tensors=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def _a ( self ): UpperCamelCase_: Optional[int] = ['This is going to be way too long.' * 1_5_0, 'short example'] UpperCamelCase_: Optional[Any] = ['not super long but more than 5 tokens', 'tiny'] UpperCamelCase_: Union[str, Any] = self._large_tokenizer(_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , return_tensors='pt' ) UpperCamelCase_: int = self._large_tokenizer( text_target=_lowerCamelCase , max_length=5 , padding=_lowerCamelCase , truncation=_lowerCamelCase , return_tensors='pt' ) assert batch.input_ids.shape == (2, 1_0_2_4) assert batch.attention_mask.shape == (2, 1_0_2_4) assert targets["input_ids"].shape == (2, 5) assert len(_lowerCamelCase ) == 2 # input_ids, attention_mask. @slow def _a ( self ): # fmt: off UpperCamelCase_: Optional[Any] = {'input_ids': [[3_8_9_7_9, 1_4_3, 1_8_4_8_5, 6_0_6, 1_3_0, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 5_4_1_8_9, 1_1_2_9, 1_1_1, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 9_1_1_4, 1_4_7_8_7, 1_2_1, 1_3_2_4_9, 1_5_8, 5_9_2, 9_5_6, 1_2_1, 1_4_6_2_1, 3_1_5_7_6, 1_4_3, 6_2_6_1_3, 1_0_8, 9_6_8_8, 9_3_0, 4_3_4_3_0, 1_1_5_6_2, 6_2_6_1_3, 3_0_4, 1_0_8, 1_1_4_4_3, 8_9_7, 1_0_8, 9_3_1_4, 1_7_4_1_5, 6_3_3_9_9, 1_0_8, 1_1_4_4_3, 7_6_1_4, 1_8_3_1_6, 1_1_8, 4_2_8_4, 7_1_4_8, 1_2_4_3_0, 1_4_3, 1_4_0_0, 2_5_7_0_3, 1_5_8, 1_1_1, 4_2_8_4, 7_1_4_8, 1_1_7_7_2, 1_4_3, 2_1_2_9_7, 1_0_6_4, 1_5_8, 1_2_2, 2_0_4, 3_5_0_6, 1_7_5_4, 1_1_3_3, 1_4_7_8_7, 1_5_8_1, 1_1_5, 3_3_2_2_4, 4_4_8_2, 1_1_1, 1_3_5_5, 1_1_0, 2_9_1_7_3, 3_1_7, 5_0_8_3_3, 1_0_8, 2_0_1_4_7, 9_4_6_6_5, 1_1_1, 7_7_1_9_8, 1_0_7, 1], [1_1_0, 6_2_6_1_3, 1_1_7, 6_3_8, 1_1_2, 1_1_3_3, 1_2_1, 2_0_0_9_8, 1_3_5_5, 7_9_0_5_0, 1_3_8_7_2, 1_3_5, 1_5_9_6, 5_3_5_4_1, 1_3_5_2, 1_4_1, 1_3_0_3_9, 5_5_4_2, 1_2_4, 3_0_2, 5_1_8, 1_1_1, 2_6_8, 2_9_5_6, 1_1_5, 1_4_9, 4_4_2_7, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_3_9, 1_2_3_5, 2_7_9_9, 1_8_2_8_9, 1_7_7_8_0, 2_0_4, 1_0_9, 9_4_7_4, 1_2_9_6, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowerCamelCase , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , ) @require_sentencepiece @require_tokenizers class _lowerCAmelCase( UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" a : Optional[int] =PegasusTokenizer a : int =PegasusTokenizerFast a : List[str] =True a : List[Any] =True def _a ( self ): super().setUp() # We have a SentencePiece fixture for testing UpperCamelCase_: Optional[int] = PegasusTokenizer(_lowerCamelCase , offset=0 , mask_token_sent=_lowerCamelCase , mask_token='[MASK]' ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _a ( self ): return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' ) def _a ( self , **_lowerCamelCase ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def _a ( self , _lowerCamelCase ): return ("This is a test", "This is a test") def _a ( self ): UpperCamelCase_: List[str] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) UpperCamelCase_: Any = self.tokenizer_class.from_pretrained(self.tmpdirname ) UpperCamelCase_: Tuple = ( 'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>' ' <pad> <pad> <pad>' ) UpperCamelCase_: str = rust_tokenizer([raw_input_str] , return_tensors=_lowerCamelCase , add_special_tokens=_lowerCamelCase ).input_ids[0] UpperCamelCase_: Dict = py_tokenizer([raw_input_str] , return_tensors=_lowerCamelCase , add_special_tokens=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) @require_torch def _a ( self ): UpperCamelCase_: List[str] = ['This is going to be way too long.' * 1_0_0_0, 'short example'] UpperCamelCase_: Optional[Any] = ['not super long but more than 5 tokens', 'tiny'] UpperCamelCase_: Dict = self._large_tokenizer(_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , return_tensors='pt' ) UpperCamelCase_: Optional[int] = self._large_tokenizer( text_target=_lowerCamelCase , max_length=5 , padding=_lowerCamelCase , truncation=_lowerCamelCase , return_tensors='pt' ) assert batch.input_ids.shape == (2, 4_0_9_6) assert batch.attention_mask.shape == (2, 4_0_9_6) assert targets["input_ids"].shape == (2, 5) assert len(_lowerCamelCase ) == 2 # input_ids, attention_mask. def _a ( self ): UpperCamelCase_: Optional[Any] = ( 'This is an example string that is used to test the original TF implementation against the HF' ' implementation' ) UpperCamelCase_: int = self._large_tokenizer(_lowerCamelCase ).input_ids self.assertListEqual( _lowerCamelCase , [1_8_2, 1_1_7, 1_4_2, 5_8_7, 4_2_1_1, 1_2_0, 1_1_7, 2_6_3, 1_1_2, 8_0_4, 1_0_9, 8_5_6, 2_5_0_1_6, 3_1_3_7, 4_6_4, 1_0_9, 2_6_9_5_5, 3_1_3_7, 1] , )
57
def lowercase__ ( A_: int , A_: int ) -> int: """simple docstring""" return 1 if input_a == input_a else 0 def lowercase__ ( ) -> None: """simple docstring""" assert xnor_gate(0 , 0 ) == 1 assert xnor_gate(0 , 1 ) == 0 assert xnor_gate(1 , 0 ) == 0 assert xnor_gate(1 , 1 ) == 1 if __name__ == "__main__": print(xnor_gate(0, 0)) print(xnor_gate(0, 1)) print(xnor_gate(1, 0)) print(xnor_gate(1, 1))
68
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCAmelCase : Optional[Any] = { '''configuration_bigbird_pegasus''': [ '''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BigBirdPegasusConfig''', '''BigBirdPegasusOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Dict = [ '''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BigBirdPegasusForCausalLM''', '''BigBirdPegasusForConditionalGeneration''', '''BigBirdPegasusForQuestionAnswering''', '''BigBirdPegasusForSequenceClassification''', '''BigBirdPegasusModel''', '''BigBirdPegasusPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys __lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
58
from __future__ import annotations import bisect def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> int: """simple docstring""" if hi < 0: __UpperCAmelCase =len(A_ ) while lo < hi: __UpperCAmelCase =lo + (hi - lo) // 2 if sorted_collection[mid] < item: __UpperCAmelCase =mid + 1 else: __UpperCAmelCase =mid return lo def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> int: """simple docstring""" if hi < 0: __UpperCAmelCase =len(A_ ) while lo < hi: __UpperCAmelCase =lo + (hi - lo) // 2 if sorted_collection[mid] <= item: __UpperCAmelCase =mid + 1 else: __UpperCAmelCase =mid return lo def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> None: """simple docstring""" sorted_collection.insert(bisect_left(A_ , A_ , A_ , A_ ) , A_ ) def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> None: """simple docstring""" sorted_collection.insert(bisect_right(A_ , A_ , A_ , A_ ) , A_ ) def lowercase__ ( A_: list[int] , A_: int ) -> int | None: """simple docstring""" __UpperCAmelCase =0 __UpperCAmelCase =len(A_ ) - 1 while left <= right: __UpperCAmelCase =left + (right - left) // 2 __UpperCAmelCase =sorted_collection[midpoint] if current_item == item: return midpoint elif item < current_item: __UpperCAmelCase =midpoint - 1 else: __UpperCAmelCase =midpoint + 1 return None def lowercase__ ( A_: list[int] , A_: int ) -> int | None: """simple docstring""" __UpperCAmelCase =bisect.bisect_left(A_ , A_ ) if index != len(A_ ) and sorted_collection[index] == item: return index return None def lowercase__ ( A_: list[int] , A_: int , A_: int , A_: int ) -> int | None: """simple docstring""" if right < left: return None __UpperCAmelCase =left + (right - left) // 2 if sorted_collection[midpoint] == item: return midpoint elif sorted_collection[midpoint] > item: return binary_search_by_recursion(A_ , A_ , A_ , midpoint - 1 ) else: return binary_search_by_recursion(A_ , A_ , midpoint + 1 , A_ ) if __name__ == "__main__": __A = input("Enter numbers separated by comma:\n").strip() __A = sorted(int(item) for item in user_input.split(",")) __A = int(input("Enter a single number to be found in the list:\n")) __A = binary_search(collection, target) if result is None: print(F"""{target} was not found in {collection}.""") else: print(F"""{target} was found at position {result} in {collection}.""")
68
0
import inspect import unittest class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->List[Any]: '''simple docstring''' try: import diffusers # noqa: F401 except ImportError: assert False def SCREAMING_SNAKE_CASE_ (self : int) ->str: '''simple docstring''' import diffusers from diffusers.dependency_versions_table import deps lowerCamelCase__: Optional[Any] =inspect.getmembers(UpperCAmelCase_ , inspect.isclass) for cls_name, cls_module in all_classes: if "dummy_" in cls_module.__module__: for backend in cls_module._backends: if backend == "k_diffusion": lowerCamelCase__: Tuple ="k-diffusion" elif backend == "invisible_watermark": lowerCamelCase__: List[Any] ="invisible-watermark" assert backend in deps, F"""{backend} is not in the deps table!"""
59
from typing import List from .keymap import KEYMAP, get_character def lowercase__ ( A_: str ) -> str: """simple docstring""" def decorator(A_: int ): __UpperCAmelCase =getattr(A_ , """handle_key""" , [] ) handle += [key] setattr(A_ , """handle_key""" , A_ ) return func return decorator def lowercase__ ( *A_: List[str] ) -> Optional[int]: """simple docstring""" def decorator(A_: Tuple ): __UpperCAmelCase =getattr(A_ , """handle_key""" , [] ) handle += keys setattr(A_ , """handle_key""" , A_ ) return func return decorator class _A ( UpperCamelCase ): """simple docstring""" def __new__( cls : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] ) -> int: __UpperCAmelCase =super().__new__(cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if not hasattr(__SCREAMING_SNAKE_CASE , """key_handler""" ): setattr(__SCREAMING_SNAKE_CASE , """key_handler""" , {} ) setattr(__SCREAMING_SNAKE_CASE , """handle_input""" , KeyHandler.handle_input ) for value in attrs.values(): __UpperCAmelCase =getattr(__SCREAMING_SNAKE_CASE , """handle_key""" , [] ) for key in handled_keys: __UpperCAmelCase =value return new_cls @staticmethod def _a ( cls : Dict ) -> List[Any]: __UpperCAmelCase =get_character() if char != KEYMAP["undefined"]: __UpperCAmelCase =ord(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =cls.key_handler.get(__SCREAMING_SNAKE_CASE ) if handler: __UpperCAmelCase =char return handler(cls ) else: return None def lowercase__ ( cls: str ) -> int: """simple docstring""" return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
68
0